From 87c4bba25dd47799d6b6be329bed38b786469587 Mon Sep 17 00:00:00 2001 From: David Benque Date: Mon, 14 Nov 2022 12:30:07 +0100 Subject: [PATCH 01/35] recommendation post processor for integer CPU --- .../pkg/recommender/main.go | 9 + .../routines/cpu_integer_post_processor.go | 89 ++++++++++ .../cpu_integer_post_processor_test.go | 160 ++++++++++++++++++ 3 files changed, 258 insertions(+) create mode 100644 vertical-pod-autoscaler/pkg/recommender/routines/cpu_integer_post_processor.go create mode 100644 vertical-pod-autoscaler/pkg/recommender/routines/cpu_integer_post_processor_test.go diff --git a/vertical-pod-autoscaler/pkg/recommender/main.go b/vertical-pod-autoscaler/pkg/recommender/main.go index 80743fd7cf2d..8a38b746fa01 100644 --- a/vertical-pod-autoscaler/pkg/recommender/main.go +++ b/vertical-pod-autoscaler/pkg/recommender/main.go @@ -67,6 +67,12 @@ var ( cpuHistogramDecayHalfLife = flag.Duration("cpu-histogram-decay-half-life", model.DefaultCPUHistogramDecayHalfLife, `The amount of time it takes a historical CPU usage sample to lose half of its weight.`) ) +// Post processors flags +var ( + // CPU as integer to benefit for CPU management Static Policy ( https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#static-policy ) + postProcessorCPUasInteger = flag.Bool("cpu-integer-post-processor-enabled", false, "Enable the cpu-integer recommendation post processor. Only containers that specify it can have their CPU recommendation rounded up to an integer value.") +) + func main() { klog.InitFlags(nil) kube_flag.InitFlags() @@ -86,6 +92,9 @@ func main() { postProcessors := []routines.RecommendationPostProcessor{ &routines.CappingPostProcessor{}, } + if *postProcessorCPUasInteger { + postProcessors = append(postProcessors, &routines.IntegerCPUPostProcessor{}) + } recommender := routines.NewRecommender(config, *checkpointsGCInterval, useCheckpoints, *vpaObjectNamespace, *recommenderName, postProcessors) promQueryTimeout, err := time.ParseDuration(*queryTimeout) diff --git a/vertical-pod-autoscaler/pkg/recommender/routines/cpu_integer_post_processor.go b/vertical-pod-autoscaler/pkg/recommender/routines/cpu_integer_post_processor.go new file mode 100644 index 000000000000..ae2c2b779631 --- /dev/null +++ b/vertical-pod-autoscaler/pkg/recommender/routines/cpu_integer_post_processor.go @@ -0,0 +1,89 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package routines + +import ( + apiv1 "k8s.io/api/core/v1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" + "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/model" + "math" + "strings" +) + +// IntegerCPUPostProcessor ensures that the recommendation delivers an integer value for CPU +// This is need for users who want to use CPU Management with static policy: https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#static-policy +type IntegerCPUPostProcessor struct{} + +const ( + // The user interface for that post processor is an annotation on the VPA object with the following format: + // vpa-post-processor.kubernetes.io/{containerName}_integerCPU=true + vpaPostProcessorPrefix = "vpa-post-processor.kubernetes.io/" + vpaPostProcessorIntegerCPUSuffix = "_integerCPU" + vpaPostProcessorIntegerCPUValue = "true" +) + +var _ RecommendationPostProcessor = &IntegerCPUPostProcessor{} + +// Process apply the capping post-processing to the recommendation. +// For this post processor the CPU value is rounded up to an integer +func (p *IntegerCPUPostProcessor) Process(vpa *model.Vpa, recommendation *vpa_types.RecommendedPodResources, policy *vpa_types.PodResourcePolicy) *vpa_types.RecommendedPodResources { + + amendedRecommendation := recommendation.DeepCopy() + + process := func(recommendation apiv1.ResourceList) { + for resourceName, recommended := range recommendation { + if resourceName != apiv1.ResourceCPU { + continue + } + v := float64(recommended.MilliValue()) / 1000 + r := int64(math.Ceil(v)) + recommended.Set(r) + recommendation[resourceName] = recommended + } + } + + for key, value := range vpa.Annotations { + containerName := extractContainerName(key, vpaPostProcessorPrefix, vpaPostProcessorIntegerCPUSuffix) + if containerName == "" || value != vpaPostProcessorIntegerCPUValue { + continue + } + + for _, r := range amendedRecommendation.ContainerRecommendations { + if r.ContainerName != containerName { + continue + } + process(r.Target) + process(r.LowerBound) + process(r.UpperBound) + process(r.UncappedTarget) + } + } + return amendedRecommendation +} + +// extractContainerName return the container name for the feature based on annotation key +// if the returned value is empty that means that the key does not match +func extractContainerName(key, prefix, suffix string) string { + if !strings.HasPrefix(key, prefix) { + return "" + } + if !strings.HasSuffix(key, suffix) { + return "" + } + + return key[len(prefix) : len(key)-len(suffix)] +} diff --git a/vertical-pod-autoscaler/pkg/recommender/routines/cpu_integer_post_processor_test.go b/vertical-pod-autoscaler/pkg/recommender/routines/cpu_integer_post_processor_test.go new file mode 100644 index 000000000000..5923eefa5c45 --- /dev/null +++ b/vertical-pod-autoscaler/pkg/recommender/routines/cpu_integer_post_processor_test.go @@ -0,0 +1,160 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package routines + +import ( + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" + "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/model" + "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test" + "testing" +) + +func Test_extractContainerName(t *testing.T) { + tests := []struct { + name string + key string + prefix string + suffix string + want string + }{ + { + name: "empty", + key: "", + prefix: "", + suffix: "", + want: "", + }, + { + name: "no match", + key: "abc", + prefix: "z", + suffix: "x", + want: "", + }, + { + name: "match", + key: "abc", + prefix: "a", + suffix: "c", + want: "b", + }, + { + name: "real", + key: vpaPostProcessorPrefix + "kafka" + vpaPostProcessorIntegerCPUSuffix, + prefix: vpaPostProcessorPrefix, + suffix: vpaPostProcessorIntegerCPUSuffix, + want: "kafka", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, extractContainerName(tt.key, tt.prefix, tt.suffix), "extractContainerName(%v, %v, %v)", tt.key, tt.prefix, tt.suffix) + }) + } +} + +func Test_integerCPUPostProcessor_Process(t *testing.T) { + tests := []struct { + name string + vpa *model.Vpa + recommendation *vpa_types.RecommendedPodResources + want *vpa_types.RecommendedPodResources + }{ + { + name: "No containers match", + vpa: &model.Vpa{Annotations: map[string]string{ + vpaPostProcessorPrefix + "container-other" + vpaPostProcessorIntegerCPUSuffix: "true", + }}, + recommendation: &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{ + test.Recommendation().WithContainer("container1").WithTarget("8.6", "200Mi").GetContainerResources(), + test.Recommendation().WithContainer("container2").WithTarget("8.2", "300Mi").GetContainerResources(), + }, + }, + want: &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{ + test.Recommendation().WithContainer("container1").WithTarget("8.6", "200Mi").GetContainerResources(), + test.Recommendation().WithContainer("container2").WithTarget("8.2", "300Mi").GetContainerResources(), + }, + }, + }, + { + name: "2 containers, 1 matching only", + vpa: &model.Vpa{Annotations: map[string]string{ + vpaPostProcessorPrefix + "container1" + vpaPostProcessorIntegerCPUSuffix: "true", + }}, + recommendation: &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{ + test.Recommendation().WithContainer("container1").WithTarget("8.6", "200Mi").GetContainerResources(), + test.Recommendation().WithContainer("container2").WithTarget("8.2", "300Mi").GetContainerResources(), + }, + }, + want: &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{ + test.Recommendation().WithContainer("container1").WithTarget("9", "200Mi").GetContainerResources(), + test.Recommendation().WithContainer("container2").WithTarget("8.2", "300Mi").GetContainerResources(), + }, + }, + }, + { + name: "2 containers, 2 matching", + vpa: &model.Vpa{Annotations: map[string]string{ + vpaPostProcessorPrefix + "container1" + vpaPostProcessorIntegerCPUSuffix: "true", + vpaPostProcessorPrefix + "container2" + vpaPostProcessorIntegerCPUSuffix: "true", + }}, + recommendation: &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{ + test.Recommendation().WithContainer("container1").WithTarget("8.6", "200Mi").GetContainerResources(), + test.Recommendation().WithContainer("container2").WithTarget("5.2", "300Mi").GetContainerResources(), + }, + }, + want: &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{ + test.Recommendation().WithContainer("container1").WithTarget("9", "200Mi").GetContainerResources(), + test.Recommendation().WithContainer("container2").WithTarget("6", "300Mi").GetContainerResources(), + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := IntegerCPUPostProcessor{} + got := c.Process(tt.vpa, tt.recommendation, nil) + assert.Equalf(t, reScale3(tt.want), reScale3(got), "Process(%v, %v, nil)", tt.vpa, tt.recommendation) + }) + } +} + +func reScale3(recommended *vpa_types.RecommendedPodResources) *vpa_types.RecommendedPodResources { + + scale3 := func(rl v1.ResourceList) { + for k, v := range rl { + v.SetMilli(v.MilliValue()) + rl[k] = v + } + } + + for _, r := range recommended.ContainerRecommendations { + scale3(r.LowerBound) + scale3(r.Target) + scale3(r.UncappedTarget) + scale3(r.UpperBound) + } + return recommended +} From 9653f6e4087071cef407d657b5cb9b43fca7e01b Mon Sep 17 00:00:00 2001 From: David Benque Date: Tue, 22 Nov 2022 16:29:53 +0100 Subject: [PATCH 02/35] update flag cpu-integer-post-processor-enabled description --- vertical-pod-autoscaler/pkg/recommender/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vertical-pod-autoscaler/pkg/recommender/main.go b/vertical-pod-autoscaler/pkg/recommender/main.go index 8a38b746fa01..2ce3cec19d8f 100644 --- a/vertical-pod-autoscaler/pkg/recommender/main.go +++ b/vertical-pod-autoscaler/pkg/recommender/main.go @@ -70,7 +70,7 @@ var ( // Post processors flags var ( // CPU as integer to benefit for CPU management Static Policy ( https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#static-policy ) - postProcessorCPUasInteger = flag.Bool("cpu-integer-post-processor-enabled", false, "Enable the cpu-integer recommendation post processor. Only containers that specify it can have their CPU recommendation rounded up to an integer value.") + postProcessorCPUasInteger = flag.Bool("cpu-integer-post-processor-enabled", false, "Enable the cpu-integer recommendation post processor. The post processor will round up CPU recommendations to a whole CPU for pods which were opted in by setting an appropriate label on VPA object (experimental)") ) func main() { From 8f3412d7ae95b7172e5006aebe151ec06da98513 Mon Sep 17 00:00:00 2001 From: David Benque Date: Thu, 24 Nov 2022 14:43:22 +0100 Subject: [PATCH 03/35] directly comparing recommendation with rescaling the values --- .../routines/cpu_integer_post_processor.go | 32 +++---- .../cpu_integer_post_processor_test.go | 96 ++++++++++++++++--- 2 files changed, 99 insertions(+), 29 deletions(-) diff --git a/vertical-pod-autoscaler/pkg/recommender/routines/cpu_integer_post_processor.go b/vertical-pod-autoscaler/pkg/recommender/routines/cpu_integer_post_processor.go index ae2c2b779631..2c221269c79a 100644 --- a/vertical-pod-autoscaler/pkg/recommender/routines/cpu_integer_post_processor.go +++ b/vertical-pod-autoscaler/pkg/recommender/routines/cpu_integer_post_processor.go @@ -18,9 +18,9 @@ package routines import ( apiv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/model" - "math" "strings" ) @@ -44,18 +44,6 @@ func (p *IntegerCPUPostProcessor) Process(vpa *model.Vpa, recommendation *vpa_ty amendedRecommendation := recommendation.DeepCopy() - process := func(recommendation apiv1.ResourceList) { - for resourceName, recommended := range recommendation { - if resourceName != apiv1.ResourceCPU { - continue - } - v := float64(recommended.MilliValue()) / 1000 - r := int64(math.Ceil(v)) - recommended.Set(r) - recommendation[resourceName] = recommended - } - } - for key, value := range vpa.Annotations { containerName := extractContainerName(key, vpaPostProcessorPrefix, vpaPostProcessorIntegerCPUSuffix) if containerName == "" || value != vpaPostProcessorIntegerCPUValue { @@ -66,15 +54,25 @@ func (p *IntegerCPUPostProcessor) Process(vpa *model.Vpa, recommendation *vpa_ty if r.ContainerName != containerName { continue } - process(r.Target) - process(r.LowerBound) - process(r.UpperBound) - process(r.UncappedTarget) + setIntegerCPURecommendation(r.Target) + setIntegerCPURecommendation(r.LowerBound) + setIntegerCPURecommendation(r.UpperBound) + setIntegerCPURecommendation(r.UncappedTarget) } } return amendedRecommendation } +func setIntegerCPURecommendation(recommendation apiv1.ResourceList) { + for resourceName, recommended := range recommendation { + if resourceName != apiv1.ResourceCPU { + continue + } + recommended.RoundUp(resource.Scale(0)) + recommendation[resourceName] = recommended + } +} + // extractContainerName return the container name for the feature based on annotation key // if the returned value is empty that means that the key does not match func extractContainerName(key, prefix, suffix string) string { diff --git a/vertical-pod-autoscaler/pkg/recommender/routines/cpu_integer_post_processor_test.go b/vertical-pod-autoscaler/pkg/recommender/routines/cpu_integer_post_processor_test.go index 5923eefa5c45..b3039ecba105 100644 --- a/vertical-pod-autoscaler/pkg/recommender/routines/cpu_integer_post_processor_test.go +++ b/vertical-pod-autoscaler/pkg/recommender/routines/cpu_integer_post_processor_test.go @@ -19,6 +19,7 @@ package routines import ( "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/model" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test" @@ -136,25 +137,96 @@ func Test_integerCPUPostProcessor_Process(t *testing.T) { t.Run(tt.name, func(t *testing.T) { c := IntegerCPUPostProcessor{} got := c.Process(tt.vpa, tt.recommendation, nil) - assert.Equalf(t, reScale3(tt.want), reScale3(got), "Process(%v, %v, nil)", tt.vpa, tt.recommendation) + assert.True(t, equalRecommendedPodResources(tt.want, got), "Process(%v, %v, nil)", tt.vpa, tt.recommendation) }) } } -func reScale3(recommended *vpa_types.RecommendedPodResources) *vpa_types.RecommendedPodResources { +func equalRecommendedPodResources(a, b *vpa_types.RecommendedPodResources) bool { + if len(a.ContainerRecommendations) != len(b.ContainerRecommendations) { + return false + } - scale3 := func(rl v1.ResourceList) { - for k, v := range rl { - v.SetMilli(v.MilliValue()) - rl[k] = v + for i := range a.ContainerRecommendations { + if !equalResourceList(a.ContainerRecommendations[i].LowerBound, b.ContainerRecommendations[i].LowerBound) { + return false + } + if !equalResourceList(a.ContainerRecommendations[i].Target, b.ContainerRecommendations[i].Target) { + return false + } + if !equalResourceList(a.ContainerRecommendations[i].UncappedTarget, b.ContainerRecommendations[i].UncappedTarget) { + return false + } + if !equalResourceList(a.ContainerRecommendations[i].UpperBound, b.ContainerRecommendations[i].UpperBound) { + return false } } + return true +} - for _, r := range recommended.ContainerRecommendations { - scale3(r.LowerBound) - scale3(r.Target) - scale3(r.UncappedTarget) - scale3(r.UpperBound) +func equalResourceList(rla, rlb v1.ResourceList) bool { + if len(rla) != len(rlb) { + return false + } + for k := range rla { + q := rla[k] + if q.Cmp(rlb[k]) != 0 { + return false + } + } + for k := range rlb { + q := rlb[k] + if q.Cmp(rla[k]) != 0 { + return false + } + } + return true +} + +func Test_setIntegerCPURecommendation(t *testing.T) { + tests := []struct { + name string + recommendation v1.ResourceList + expectedRecommendation v1.ResourceList + }{ + { + name: "unchanged", + recommendation: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("8"), + v1.ResourceMemory: resource.MustParse("6Gi"), + }, + expectedRecommendation: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("8"), + v1.ResourceMemory: resource.MustParse("6Gi"), + }, + }, + { + name: "round up from 0.1", + recommendation: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("8.1"), + v1.ResourceMemory: resource.MustParse("6Gi"), + }, + expectedRecommendation: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("9"), + v1.ResourceMemory: resource.MustParse("6Gi"), + }, + }, + { + name: "round up from 0.9", + recommendation: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("8.9"), + v1.ResourceMemory: resource.MustParse("6Gi"), + }, + expectedRecommendation: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("9"), + v1.ResourceMemory: resource.MustParse("6Gi"), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + setIntegerCPURecommendation(tt.recommendation) + assert.True(t, equalResourceList(tt.recommendation, tt.expectedRecommendation)) + }) } - return recommended } From 13f8eeedbd360a236fdedd0f42ef78280579c25f Mon Sep 17 00:00:00 2001 From: David Benque Date: Thu, 24 Nov 2022 15:17:36 +0100 Subject: [PATCH 04/35] Capping post-processor should always be the last one --- vertical-pod-autoscaler/pkg/recommender/main.go | 7 ++++--- .../routines/cpu_integer_post_processor_test.go | 14 +++++++------- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/vertical-pod-autoscaler/pkg/recommender/main.go b/vertical-pod-autoscaler/pkg/recommender/main.go index 2ce3cec19d8f..8146e4074236 100644 --- a/vertical-pod-autoscaler/pkg/recommender/main.go +++ b/vertical-pod-autoscaler/pkg/recommender/main.go @@ -89,12 +89,13 @@ func main() { useCheckpoints := *storage != "prometheus" - postProcessors := []routines.RecommendationPostProcessor{ - &routines.CappingPostProcessor{}, - } + var postProcessors []routines.RecommendationPostProcessor if *postProcessorCPUasInteger { postProcessors = append(postProcessors, &routines.IntegerCPUPostProcessor{}) } + // CappingPostProcessor, should always come in the last position for post-processing + postProcessors = append(postProcessors, &routines.CappingPostProcessor{}) + recommender := routines.NewRecommender(config, *checkpointsGCInterval, useCheckpoints, *vpaObjectNamespace, *recommenderName, postProcessors) promQueryTimeout, err := time.ParseDuration(*queryTimeout) diff --git a/vertical-pod-autoscaler/pkg/recommender/routines/cpu_integer_post_processor_test.go b/vertical-pod-autoscaler/pkg/recommender/routines/cpu_integer_post_processor_test.go index b3039ecba105..312baac8d4b5 100644 --- a/vertical-pod-autoscaler/pkg/recommender/routines/cpu_integer_post_processor_test.go +++ b/vertical-pod-autoscaler/pkg/recommender/routines/cpu_integer_post_processor_test.go @@ -26,7 +26,7 @@ import ( "testing" ) -func Test_extractContainerName(t *testing.T) { +func TestExtractContainerName(t *testing.T) { tests := []struct { name string key string @@ -70,7 +70,7 @@ func Test_extractContainerName(t *testing.T) { } } -func Test_integerCPUPostProcessor_Process(t *testing.T) { +func TestIntegerCPUPostProcessor_Process(t *testing.T) { tests := []struct { name string vpa *model.Vpa @@ -80,7 +80,7 @@ func Test_integerCPUPostProcessor_Process(t *testing.T) { { name: "No containers match", vpa: &model.Vpa{Annotations: map[string]string{ - vpaPostProcessorPrefix + "container-other" + vpaPostProcessorIntegerCPUSuffix: "true", + vpaPostProcessorPrefix + "container-other" + vpaPostProcessorIntegerCPUSuffix: vpaPostProcessorIntegerCPUValue, }}, recommendation: &vpa_types.RecommendedPodResources{ ContainerRecommendations: []vpa_types.RecommendedContainerResources{ @@ -98,7 +98,7 @@ func Test_integerCPUPostProcessor_Process(t *testing.T) { { name: "2 containers, 1 matching only", vpa: &model.Vpa{Annotations: map[string]string{ - vpaPostProcessorPrefix + "container1" + vpaPostProcessorIntegerCPUSuffix: "true", + vpaPostProcessorPrefix + "container1" + vpaPostProcessorIntegerCPUSuffix: vpaPostProcessorIntegerCPUValue, }}, recommendation: &vpa_types.RecommendedPodResources{ ContainerRecommendations: []vpa_types.RecommendedContainerResources{ @@ -116,8 +116,8 @@ func Test_integerCPUPostProcessor_Process(t *testing.T) { { name: "2 containers, 2 matching", vpa: &model.Vpa{Annotations: map[string]string{ - vpaPostProcessorPrefix + "container1" + vpaPostProcessorIntegerCPUSuffix: "true", - vpaPostProcessorPrefix + "container2" + vpaPostProcessorIntegerCPUSuffix: "true", + vpaPostProcessorPrefix + "container1" + vpaPostProcessorIntegerCPUSuffix: vpaPostProcessorIntegerCPUValue, + vpaPostProcessorPrefix + "container2" + vpaPostProcessorIntegerCPUSuffix: vpaPostProcessorIntegerCPUValue, }}, recommendation: &vpa_types.RecommendedPodResources{ ContainerRecommendations: []vpa_types.RecommendedContainerResources{ @@ -183,7 +183,7 @@ func equalResourceList(rla, rlb v1.ResourceList) bool { return true } -func Test_setIntegerCPURecommendation(t *testing.T) { +func TestSetIntegerCPURecommendation(t *testing.T) { tests := []struct { name string recommendation v1.ResourceList From 17ef62801340072d7c551b05a869f3d2bedfd6a0 Mon Sep 17 00:00:00 2001 From: David Benque Date: Fri, 2 Dec 2022 09:18:40 -0500 Subject: [PATCH 05/35] add documentation for CPU integer post processor --- vertical-pod-autoscaler/FAQ.md | 1 + vertical-pod-autoscaler/README.md | 12 ++++++++++++ 2 files changed, 13 insertions(+) diff --git a/vertical-pod-autoscaler/FAQ.md b/vertical-pod-autoscaler/FAQ.md index a26654e0b1d9..9ab83ceb535a 100644 --- a/vertical-pod-autoscaler/FAQ.md +++ b/vertical-pod-autoscaler/FAQ.md @@ -183,6 +183,7 @@ Name | Type | Description | Default `memory-aggregation-interval-count` | Int64 | The number of consecutive memory-aggregation-intervals which make up the MemoryAggregationWindowLength which in turn is the period for memory usage aggregation by VPA. In other words, MemoryAggregationWindowLength = memory-aggregation-interval * memory-aggregation-interval-count. | model.DefaultMemoryAggregationIntervalCount `memory-histogram-decay-half-life` | Duration | The amount of time it takes a historical memory usage sample to lose half of its weight. In other words, a fresh usage sample is twice as 'important' as one with age equal to the half life period. | model.DefaultMemoryHistogramDecayHalfLife `cpu-histogram-decay-half-life` | Duration | The amount of time it takes a historical CPU usage sample to lose half of its weight. | model.DefaultCPUHistogramDecayHalfLife +`cpu-integer-post-processor-enabled` | Bool | Enable the CPU integer recommendation post processor | false ### What are the parameters to VPA updater? diff --git a/vertical-pod-autoscaler/README.md b/vertical-pod-autoscaler/README.md index 1b94ad5ab069..fc6bc707befb 100644 --- a/vertical-pod-autoscaler/README.md +++ b/vertical-pod-autoscaler/README.md @@ -19,6 +19,7 @@ - [Capping to Limit Range](#capping-to-limit-range) - [Resource Policy Overriding Limit Range](#resource-policy-overriding-limit-range) - [Starting multiple recommenders](#starting-multiple-recommenders) + - [Using CPU management with static policy](#using-cpu-management-with-static-policy) - [Known limitations](#known-limitations) - [Related links](#related-links) @@ -294,6 +295,17 @@ Please note the usage of the following arguments to override default names and p You can then choose which recommender to use by setting `recommenders` inside the `VerticalPodAutoscaler` spec. +### Using CPU management with static policy + +If you are using the [CPU management with static policy](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#static-policy) for some containers, +you probably want the CPU recommendation to be an integer. A dedicated recommendation pre-processor can perform a round up on the CPU recommendation. Recommendation capping still applies after the round up. +To activate this feature, pass the flag `--cpu-integer-post-processor-enabled` when you start the recommender. +The pre-processor only acts on containers having a specific configuration. This configuration consists in an annotation on your VPA object for each impacted container. +The annotation format is the following: +``` +vpa-post-processor.kubernetes.io/{containerName}_integerCPU=true +``` + # Known limitations * Whenever VPA updates the pod resources, the pod is recreated, which causes all From b89d2c1a89186afb9c2eeb39a29da0c3ccea3276 Mon Sep 17 00:00:00 2001 From: Junwon Kwon Date: Sat, 24 Dec 2022 00:33:56 +0900 Subject: [PATCH 06/35] fix typo in FAQ --- cluster-autoscaler/FAQ.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster-autoscaler/FAQ.md b/cluster-autoscaler/FAQ.md index 853adc00688d..f7d18269a26c 100644 --- a/cluster-autoscaler/FAQ.md +++ b/cluster-autoscaler/FAQ.md @@ -869,7 +869,7 @@ This limitation was solved with introduced as beta in Kubernetes 1.11 and planned for GA in 1.13. To allow CA to take advantage of topological scheduling, use separate node groups per zone. This way CA knows exactly which node group will create nodes in the required zone rather than relying on the cloud provider choosing a zone for a new node in a multi-zone node group. -When using separate node groups per zone, the `--balance-similar-node-groups` flag will keep nodes balanced across zones for workloads that dont require topological scheduling. +When using separate node groups per zone, the `--balance-similar-node-groups` flag will keep nodes balanced across zones for workloads that don't require topological scheduling. ### CA doesn’t work, but it used to work yesterday. Why? From 10a4d82ea1acc0968f7fbd1cf28650ac535d0b91 Mon Sep 17 00:00:00 2001 From: "jesse.millan" Date: Fri, 23 Dec 2022 19:35:47 -0800 Subject: [PATCH 07/35] Uncomment myself now that I am a Kubernetes org member. --- cluster-autoscaler/cloudprovider/oci/OWNERS | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cluster-autoscaler/cloudprovider/oci/OWNERS b/cluster-autoscaler/cloudprovider/oci/OWNERS index 62122c4ebdbf..f8c5f193f32d 100644 --- a/cluster-autoscaler/cloudprovider/oci/OWNERS +++ b/cluster-autoscaler/cloudprovider/oci/OWNERS @@ -1,5 +1,5 @@ approvers: -#- jlamillan +- jlamillan reviewers: -#- jlamillan +- jlamillan #- ericrrath From 66c449cee2358f087bbfb003eb95f6b3d0fad985 Mon Sep 17 00:00:00 2001 From: Michael Grosser Date: Tue, 25 Oct 2022 14:05:59 -0700 Subject: [PATCH 08/35] aws: allow setting max retries from AWS_MAX_ATTEMPTS env var --- .../cloudprovider/aws/README.md | 1 + .../cloudprovider/aws/aws_sdk_provider.go | 28 +++++++++++++++++-- 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/cluster-autoscaler/cloudprovider/aws/README.md b/cluster-autoscaler/cloudprovider/aws/README.md index 2c01cd227acb..2c8c034a9826 100644 --- a/cluster-autoscaler/cloudprovider/aws/README.md +++ b/cluster-autoscaler/cloudprovider/aws/README.md @@ -521,3 +521,4 @@ Please note: it is also possible to mount the cloud config file from host: EC2 launch configuration has the setting `Metadata response hop limit` set to `2`. Otherwise, the `/latest/api/token` call will timeout and result in an error. See [AWS docs here](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html#configuring-instance-metadata-options) for further information. - If you don't use EKS managed nodegroups, don't add the `eks:nodegroup-name` tag to the ASG as this will lead to extra EKS API calls that could slow down scaling when there are 0 nodes in the nodegroup. +- Set `AWS_MAX_ATTEMPTS` to configure max retries diff --git a/cluster-autoscaler/cloudprovider/aws/aws_sdk_provider.go b/cluster-autoscaler/cloudprovider/aws/aws_sdk_provider.go index 4fa01be09868..05ba69c5514e 100644 --- a/cluster-autoscaler/cloudprovider/aws/aws_sdk_provider.go +++ b/cluster-autoscaler/cloudprovider/aws/aws_sdk_provider.go @@ -27,6 +27,7 @@ import ( "k8s.io/klog/v2" provider_aws "k8s.io/legacy-cloud-providers/aws" "os" + "strconv" "strings" ) @@ -49,8 +50,16 @@ func createAWSSDKProvider(configReader io.Reader) (*awsSDKProvider, error) { return nil, err } - sess, err := session.NewSession(aws.NewConfig().WithRegion(getRegion()). - WithEndpointResolver(getResolver(cfg))) + config := aws.NewConfig(). + WithRegion(getRegion()). + WithEndpointResolver(getResolver(cfg)) + + config, err = setMaxRetriesFromEnv(config) + if err != nil { + return nil, err + } + + sess, err := session.NewSession(config) if err != nil { return nil, err @@ -63,6 +72,21 @@ func createAWSSDKProvider(configReader io.Reader) (*awsSDKProvider, error) { return provider, nil } +// setMaxRetriesFromEnv sets aws config MaxRetries by reading AWS_MAX_ATTEMPTS +// aws sdk does not auto-set these so instead of having more config options we can reuse what the aws cli +// does and read AWS_MAX_ATTEMPTS from the env https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html +func setMaxRetriesFromEnv(config *aws.Config) (*aws.Config, error) { + maxRetries := os.Getenv("AWS_MAX_ATTEMPTS") + if maxRetries != "" { + num, err := strconv.Atoi(maxRetries) + if err != nil { + return nil, err + } + config = config.WithMaxRetries(num) + } + return config, nil +} + type awsSDKProvider struct { session *session.Session } From 62c68e1280f395b0c341946fc5016b65b7b04c01 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bart=C5=82omiej=20Wr=C3=B3blewski?= Date: Tue, 27 Dec 2022 15:01:44 +0000 Subject: [PATCH 09/35] Move PredicateChecker initialization before processors initialization --- cluster-autoscaler/core/autoscaler.go | 8 -------- cluster-autoscaler/main.go | 7 +++++++ 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/cluster-autoscaler/core/autoscaler.go b/cluster-autoscaler/core/autoscaler.go index 523642b1018b..f6084ec46ad1 100644 --- a/cluster-autoscaler/core/autoscaler.go +++ b/cluster-autoscaler/core/autoscaler.go @@ -90,14 +90,6 @@ func initializeDefaultOptions(opts *AutoscalerOptions) error { if opts.AutoscalingKubeClients == nil { opts.AutoscalingKubeClients = context.NewAutoscalingKubeClients(opts.AutoscalingOptions, opts.KubeClient, opts.EventsKubeClient) } - if opts.PredicateChecker == nil { - predicateCheckerStopChannel := make(chan struct{}) - predicateChecker, err := predicatechecker.NewSchedulerBasedPredicateChecker(opts.KubeClient, predicateCheckerStopChannel) - if err != nil { - return err - } - opts.PredicateChecker = predicateChecker - } if opts.ClusterSnapshot == nil { opts.ClusterSnapshot = clustersnapshot.NewBasicClusterSnapshot() } diff --git a/cluster-autoscaler/main.go b/cluster-autoscaler/main.go index 4b749546fcc9..22e1d9460098 100644 --- a/cluster-autoscaler/main.go +++ b/cluster-autoscaler/main.go @@ -30,6 +30,7 @@ import ( "time" "k8s.io/autoscaler/cluster-autoscaler/debuggingsnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" "github.com/spf13/pflag" @@ -365,12 +366,18 @@ func buildAutoscaler(debuggingSnapshotter debuggingsnapshot.DebuggingSnapshotter kubeClient := createKubeClient(getKubeConfig()) eventsKubeClient := createKubeClient(getKubeConfig()) + predicateChecker, err := predicatechecker.NewSchedulerBasedPredicateChecker(kubeClient, make(chan struct{})) + if err != nil { + return nil, err + } + opts := core.AutoscalerOptions{ AutoscalingOptions: autoscalingOptions, ClusterSnapshot: clustersnapshot.NewDeltaClusterSnapshot(), KubeClient: kubeClient, EventsKubeClient: eventsKubeClient, DebuggingSnapshotter: debuggingSnapshotter, + PredicateChecker: predicateChecker, } opts.Processors = ca_processors.DefaultProcessors() From cd26bcfe605b579f7dec8caac7f7475d57a68bf9 Mon Sep 17 00:00:00 2001 From: Michael Grosser Date: Thu, 29 Sep 2022 15:39:05 -0700 Subject: [PATCH 10/35] allow setting kuberentes client burst and qps to avoid rate limiting --- cluster-autoscaler/config/autoscaling_options.go | 4 ++++ cluster-autoscaler/main.go | 11 ++++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/cluster-autoscaler/config/autoscaling_options.go b/cluster-autoscaler/config/autoscaling_options.go index f43fe5fc2c7f..5d212d6e9358 100644 --- a/cluster-autoscaler/config/autoscaling_options.go +++ b/cluster-autoscaler/config/autoscaling_options.go @@ -168,6 +168,10 @@ type AutoscalingOptions struct { ConcurrentGceRefreshes int // Path to kube configuration if available KubeConfigPath string + // Burst setting for kubernetes client + KubeClientBurst int + // QPS setting for kubernetes client + KubeClientQPS float64 // ClusterAPICloudConfigAuthoritative tells the Cluster API provider to treat the CloudConfig option as authoritative and // not use KubeConfigPath as a fallback when it is not provided. ClusterAPICloudConfigAuthoritative bool diff --git a/cluster-autoscaler/main.go b/cluster-autoscaler/main.go index 4b749546fcc9..fa6a29abe2b6 100644 --- a/cluster-autoscaler/main.go +++ b/cluster-autoscaler/main.go @@ -90,6 +90,8 @@ var ( address = flag.String("address", ":8085", "The address to expose prometheus metrics.") kubernetes = flag.String("kubernetes", "", "Kubernetes master location. Leave blank for default") kubeConfigFile = flag.String("kubeconfig", "", "Path to kubeconfig file with authorization and master location information.") + kubeClientBurst = flag.Int("kube-client-burst", rest.DefaultBurst, "Burst value for kubernetes client.") + kubeClientQPS = flag.Float64("kube-client-qps", float64(rest.DefaultQPS), "QPS value for kubernetes client.") cloudConfig = flag.String("cloud-config", "", "The path to the cloud provider configuration file. Empty string for no configuration file.") namespace = flag.String("namespace", "kube-system", "Namespace in which cluster-autoscaler run.") enforceNodeGroupMinSize = flag.Bool("enforce-node-group-min-size", false, "Should CA scale up the node group to the configured min size if needed.") @@ -290,6 +292,8 @@ func createAutoscalingOptions() config.AutoscalingOptions { BalancingExtraIgnoredLabels: *balancingIgnoreLabelsFlag, BalancingLabels: *balancingLabelsFlag, KubeConfigPath: *kubeConfigFile, + KubeClientBurst: *kubeClientBurst, + KubeClientQPS: *kubeClientQPS, NodeDeletionDelayTimeout: *nodeDeletionDelayTimeout, AWSUseStaticInstanceList: *awsUseStaticInstanceList, ConcurrentGceRefreshes: *concurrentGceRefreshes, @@ -362,7 +366,12 @@ func registerSignalHandlers(autoscaler core.Autoscaler) { func buildAutoscaler(debuggingSnapshotter debuggingsnapshot.DebuggingSnapshotter) (core.Autoscaler, error) { // Create basic config from flags. autoscalingOptions := createAutoscalingOptions() - kubeClient := createKubeClient(getKubeConfig()) + + kubeClientConfig := getKubeConfig() + kubeClientConfig.Burst = autoscalingOptions.KubeClientBurst + kubeClientConfig.QPS = float32(autoscalingOptions.KubeClientQPS) + kubeClient := createKubeClient(kubeClientConfig) + eventsKubeClient := createKubeClient(getKubeConfig()) opts := core.AutoscalerOptions{ From b168e219ea40b4febade5d2938f39272553a1fe6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20K=C5=82obuszewski?= Date: Fri, 30 Dec 2022 12:52:57 +0100 Subject: [PATCH 11/35] State expectations around cloudprovider OWNERS --- cluster-autoscaler/cloudprovider/POLICY.md | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/cluster-autoscaler/cloudprovider/POLICY.md b/cluster-autoscaler/cloudprovider/POLICY.md index d872df4d2d17..c6fff03f8778 100644 --- a/cluster-autoscaler/cloudprovider/POLICY.md +++ b/cluster-autoscaler/cloudprovider/POLICY.md @@ -74,13 +74,14 @@ in-tree cloudprovider follows the following rules: * It is required that both reviewers and approvers sections of OWNERS file are non-empty. * This can create a chicken and egg problem, where adding a cloudprovider - requires being a member of Kubernetes org and becoming a member of the - organization requires a history of code contributions. For this reason it - is allowed for the OWNERS file to temporarily contain - commented out github handles. There is an expectation that at least some of - the owners will ultimately join Kubernetes organization (by following the - [process](https://github.com/kubernetes/community/blob/master/community-membership.md)) - so that they can approve PRs to their cloudprovider. + requires being a member of Kubernetes org and becoming a member of the + organization requires a history of code contributions. For this reason it + is allowed for the OWNERS file to temporarily contain commented out github + handles. There is an expectation that at least some of the owners will + join Kubernetes organization (by following the + [process](https://github.com/kubernetes/community/blob/master/community-membership.md)) + within one release cycly, so that they can approve PRs to their + cloudprovider. * Cloudprovider shouldn't introduce new dependencies (such as clients/SDKs) to top-level go.mod vendor, unless those dependencies are already imported by kubernetes/kubernetes repository and the same version of the library is @@ -112,6 +113,8 @@ maintenance request_ (CMR) mechanism. * A CMR may be issued no later then [enhancements freeze](https://github.com/kubernetes/sig-release/blob/master/releases/release_phases.md#enhancements-freeze) of a given Kubernetes minor version. + * If a given cloud provider was added more than one release cycle ago and there + are no valid OWNERS, CMR should request OWNERS file update. Cloudprovider owners will be required to address CMR or request an exception via the CMR github issue. A failure to take any action will result in cloudprovider From 730f83885a4857600bc3cfb07b72e490db22a2dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20K=C5=82obuszewski?= Date: Fri, 30 Dec 2022 13:04:42 +0100 Subject: [PATCH 12/35] Stop asking PR creators to name modified components Rely on labels instead. --- .github/PULL_REQUEST_TEMPLATE.md | 6 ------ addon-resizer/OWNERS | 2 ++ vertical-pod-autoscaler/OWNERS | 2 ++ 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index fab0c798e271..0d03000772dc 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,9 +1,3 @@ -#### Which component this PR applies to? - - - #### What type of PR is this? From 643962b7307801bbd4f51eae097f41a339ce8c98 Mon Sep 17 00:00:00 2001 From: shubham82 Date: Thu, 12 Jan 2023 12:09:45 +0900 Subject: [PATCH 18/35] Added the RBAC Permission to alicloud. --- .../alicloud/examples/cluster-autoscaler-standard.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster-autoscaler/cloudprovider/alicloud/examples/cluster-autoscaler-standard.yaml b/cluster-autoscaler/cloudprovider/alicloud/examples/cluster-autoscaler-standard.yaml index 344a3bd96db8..7543b14bcb8c 100644 --- a/cluster-autoscaler/cloudprovider/alicloud/examples/cluster-autoscaler-standard.yaml +++ b/cluster-autoscaler/cloudprovider/alicloud/examples/cluster-autoscaler-standard.yaml @@ -45,7 +45,7 @@ rules: resources: ["statefulsets", "replicasets", "daemonsets"] verbs: ["watch","list","get"] - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] + resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities"] verbs: ["watch","list","get"] --- From 8ca3afc35b4ca895895525f3a084571d32363a1f Mon Sep 17 00:00:00 2001 From: michael mccune Date: Mon, 19 Dec 2022 13:41:04 -0500 Subject: [PATCH 19/35] update clusterapi readme with table of contents this change will make navigating the readme easier for users. --- .../cloudprovider/clusterapi/README.md | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/cluster-autoscaler/cloudprovider/clusterapi/README.md b/cluster-autoscaler/cloudprovider/clusterapi/README.md index fb1811f5bd69..f4135e425034 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/README.md +++ b/cluster-autoscaler/cloudprovider/clusterapi/README.md @@ -5,6 +5,29 @@ the [cluster-api project](https://github.com/kubernetes-sigs/cluster-api) to manage the provisioning and de-provisioning of nodes within a Kubernetes cluster. +## Table of Contents: + +* [Kubernetes Version](#kubernetes-version) +* [Starting the Autoscaler](#starting-the-autoscaler) +* [Configuring node group auto discovery](#configuring-node-group-auto-discovery) +* [Connecting cluster-autoscaler to Cluster API management and workload Clusters](#connecting-cluster-autoscaler-to-cluster-api-management-and-workload-clusters) + * [Autoscaler running in a joined cluster using service account credentials](#autoscaler-running-in-a-joined-cluster-using-service-account-credentials) + * [Autoscaler running in workload cluster using service account credentials, with separate management cluster](#autoscaler-running-in-workload-cluster-using-service-account-credentials-with-separate-management-cluster) + * [Autoscaler running in management cluster using service account credentials, with separate workload cluster](#autoscaler-running-in-management-cluster-using-service-account-credentials-with-separate-workload-cluster) + * [Autoscaler running anywhere, with separate kubeconfigs for management and workload clusters](#autoscaler-running-anywhere-with-separate-kubeconfigs-for-management-and-workload-clusters) + * [Autoscaler running anywhere, with a common kubeconfig for management and workload clusters](#autoscaler-running-anywhere-with-a-common-kubeconfig-for-management-and-workload-clusters) +* [Enabling Autoscaling](#enabling-autoscaling) + * [Scale from zero support](#scale-from-zero-support) + * [RBAC changes for scaling from zero](#rbac-changes-for-scaling-from-zero) + * [Pre-defined labels and taints on nodes scaled from zero](#pre-defined-labels-and-taints-on-nodes-scaled-from-zero) +* [Specifying a Custom Resource Group](#specifying-a-custom-resource-group) +* [Specifying a Custom Resource Version](#specifying-a-custom-resource-version) +* [Sample manifest](#sample-manifest) + * [A note on permissions](#a-note-on-permissions) +* [Autoscaling with ClusterClass and Managed Topologies](#autoscaling-with-clusterclass-and-managed-topologies) +* [Special note on GPU instances](#special-note-on-gpu-instances) + + ## Kubernetes Version The cluster-api provider requires Kubernetes v1.16 or greater to run the From 6b80a7134af3e805683e8147f6e734df29fe77f0 Mon Sep 17 00:00:00 2001 From: michael mccune Date: Mon, 19 Dec 2022 14:14:46 -0500 Subject: [PATCH 20/35] add a note to clusterapi readme about ignored labels this change adds a section to the readme that provides advice for clusterapi users about which labels they might want to ignore when using the balance similar node groups flag on various cloud providers. --- .../cloudprovider/clusterapi/README.md | 58 +++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git a/cluster-autoscaler/cloudprovider/clusterapi/README.md b/cluster-autoscaler/cloudprovider/clusterapi/README.md index f4135e425034..56ba6a9cf4a0 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/README.md +++ b/cluster-autoscaler/cloudprovider/clusterapi/README.md @@ -26,6 +26,7 @@ cluster. * [A note on permissions](#a-note-on-permissions) * [Autoscaling with ClusterClass and Managed Topologies](#autoscaling-with-clusterclass-and-managed-topologies) * [Special note on GPU instances](#special-note-on-gpu-instances) +* [Special note on balancing similar node groups](#special-note-on-balancing-similar-node-groups) ## Kubernetes Version @@ -359,3 +360,60 @@ CAPI cloudprovider, the label format is as follows: `cluster-api/accelerator=` `` is arbitrary. + +## Special note on balancing similar node groups + +The Cluster Autoscaler feature to enable balancing similar node groups +(activated with the `--balance-similar-node-groups` flag) is a powerful and +popular feature. When enabled, the Cluster Autoscaler will attempt to create +new nodes by adding them in a manner that balances the creation between +similar node groups. With Cluster API, these node groups correspond directly +to the scalable resources associated (usually MachineDeployments and MachineSets) +with the nodes in question. In order for the nodes of these scalable resources +to be considered similar by the Cluster Autoscaler, they must have the same +capacity, labels, and taints for the nodes which will be created from them. + +To help assist the Cluster Autoscaler in determining which node groups are +similar, the command line flags `--balancing-ignore-label` and +`--balancing-label` are provided. For an expanded discussion about balancing +similar node groups and the options which are available, please see the +[Cluster Autoscaler FAQ](../../FAQ.md). + +Because Cluster API can address many different cloud providers, it is important +to configure the balancing labels to ignore provider-specific labels which +are used for carrying zonal information on Kubernetes nodes. The Cluster +Autoscaler implementation for Cluster API does not assume any labels (aside from +the [well-known Kubernetes labels](https://kubernetes.io/docs/reference/labels-annotations-taints/)) +to be ignored when running. Users must configure their Cluster Autoscaler deployment +to ignore labels which might be different between nodes, but which do not +otherwise affect node behavior or size (for example when two MachineDeployments +are the same except for their deployment zones). The Cluster API community has +decided not to carry cloud provider specific labels in the Cluster Autoscaler +to reduce the possibility for labels to clash between providers. Additionally, +the community has agreed to promote documentation and the use of the `--balancing-ignore-label` +flag as the preferred method of deployment to reduce the extended need for +maintenance on the Cluster Autoscaler when new providers are added or updated. +For further context around this decision, please see the +[Cluster API Deep Dive into Cluster Autoscaler Node Group Balancing discussion from 2022-09-12](https://www.youtube.com/watch?v=jbhca_9oPuQ&t=5s). + +The following table shows some of the most common labels used by cloud providers +to designate regional or zonal information on Kubernetes nodes. It is shared +here as a reference for users who might be deploying on these infrastructures. + +| Cloud Provider | Label to ignore | Notes | +| --- | --- | --- | +| Alibaba Cloud | `topology.diskplugin.csi.alibabacloud.com/zone` | Used by the Alibaba Cloud CSI driver as a target for persistent volume node affinity | +| AWS | `alpha.eksctl.io/instance-id` | Used by `eksctl` to identify instances | +| AWS | `alpha.eksctl.io/nodegroup-name` | Used by `eksctl` to identify node group names | +| AWS | `eks.amazonaws.com/nodegroup` | Used by EKS to identify node groups | +| AWS | `k8s.amazonaws.com/eniConfig` | Used by the AWS CNI for custom networking | +| AWS | `lifecycle` | Used by AWS as a label for spot instances | +| AWS | `topology.ebs.csi.aws.com/zone` | Used by the AWS EBS CSI driver as a target for persistent volume node affinity | +| Azure | `topology.disk.csi.azure.com/zone` | Used as the topology key by the Azure Disk CSI driver | +| Azure | `agentpool` | Legacy label used to specify to which Azure node pool a particular node belongs | +| Azure | `kubernetes.azure.com/agentpool` | Used by AKS to identify to which node pool a particular node belongs | +| GCE | `topology.gke.io/zone` | Used to specify the zone of the node | +| IBM Cloud | `ibm-cloud.kubernetes.io/worker-id` | Used by the IBM Cloud Cloud Controller Manager to identify the node | +| IBM Cloud | `vpc-block-csi-driver-labels` | Used by the IBM Cloud CSI driver as a target for persistent volume node affinity | +| IBM Cloud | `ibm-cloud.kubernetes.io/vpc-instance-id` | Used when a VPC is in use on IBM Cloud | + From 955396e857c52fd6de99202d5d994211427a30d9 Mon Sep 17 00:00:00 2001 From: michael mccune Date: Mon, 19 Dec 2022 14:21:42 -0500 Subject: [PATCH 21/35] remove clusterapi nodegroupset processor as discussed with the cluster api community[0], the nodegroupset processor is being removed from the clusterapi provider implementation in favor of instructing our community on the use of the --balancing-ignore-label flag. due to the wide variety of provider infrastructures that clusterapi can be deployed on, we would prefer to not encode all of these labels in the autoscaler itself. see the linked recording for more information. [0] https://www.youtube.com/watch?v=jbhca_9oPuQ --- cluster-autoscaler/main.go | 2 - .../nodegroupset/clusterapi_nodegroups.go | 47 --------- .../clusterapi_nodegroups_test.go | 98 ------------------- 3 files changed, 147 deletions(-) delete mode 100644 cluster-autoscaler/processors/nodegroupset/clusterapi_nodegroups.go delete mode 100644 cluster-autoscaler/processors/nodegroupset/clusterapi_nodegroups_test.go diff --git a/cluster-autoscaler/main.go b/cluster-autoscaler/main.go index d1562adefbcd..41f7ee9b9235 100644 --- a/cluster-autoscaler/main.go +++ b/cluster-autoscaler/main.go @@ -416,8 +416,6 @@ func buildAutoscaler(debuggingSnapshotter debuggingsnapshot.DebuggingSnapshotter } else if autoscalingOptions.CloudProviderName == cloudprovider.GceProviderName { nodeInfoComparatorBuilder = nodegroupset.CreateGceNodeInfoComparator opts.Processors.TemplateNodeInfoProvider = nodeinfosprovider.NewAnnotationNodeInfoProvider(nodeInfoCacheExpireTime) - } else if autoscalingOptions.CloudProviderName == cloudprovider.ClusterAPIProviderName { - nodeInfoComparatorBuilder = nodegroupset.CreateClusterAPINodeInfoComparator } nodeInfoComparator = nodeInfoComparatorBuilder(autoscalingOptions.BalancingExtraIgnoredLabels, autoscalingOptions.NodeGroupSetRatios) } diff --git a/cluster-autoscaler/processors/nodegroupset/clusterapi_nodegroups.go b/cluster-autoscaler/processors/nodegroupset/clusterapi_nodegroups.go deleted file mode 100644 index 64188aea7e88..000000000000 --- a/cluster-autoscaler/processors/nodegroupset/clusterapi_nodegroups.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nodegroupset - -import ( - "k8s.io/autoscaler/cluster-autoscaler/config" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" -) - -// CreateClusterAPINodeInfoComparator returns a comparator that checks if two nodes should be considered -// part of the same NodeGroupSet. This is true if they match usual conditions checked by IsCloudProviderNodeInfoSimilar, -// even if they have different infrastructure provider-specific labels. -func CreateClusterAPINodeInfoComparator(extraIgnoredLabels []string, ratioOpts config.NodeGroupDifferenceRatios) NodeInfoComparator { - capiIgnoredLabels := map[string]bool{ - "topology.ebs.csi.aws.com/zone": true, // this is a label used by the AWS EBS CSI driver as a target for Persistent Volume Node Affinity - "topology.diskplugin.csi.alibabacloud.com/zone": true, // this is a label used by the Alibaba Cloud CSI driver as a target for Persistent Volume Node Affinity - "ibm-cloud.kubernetes.io/worker-id": true, // this is a label used by the IBM Cloud Cloud Controler Manager - "vpc-block-csi-driver-labels": true, // this is a label used by the IBM Cloud CSI driver as a target for Persisten Volume Node Affinity - - } - - for k, v := range BasicIgnoredLabels { - capiIgnoredLabels[k] = v - } - - for _, k := range extraIgnoredLabels { - capiIgnoredLabels[k] = true - } - - return func(n1, n2 *schedulerframework.NodeInfo) bool { - return IsCloudProviderNodeInfoSimilar(n1, n2, capiIgnoredLabels, ratioOpts) - } -} diff --git a/cluster-autoscaler/processors/nodegroupset/clusterapi_nodegroups_test.go b/cluster-autoscaler/processors/nodegroupset/clusterapi_nodegroups_test.go deleted file mode 100644 index df6969223b12..000000000000 --- a/cluster-autoscaler/processors/nodegroupset/clusterapi_nodegroups_test.go +++ /dev/null @@ -1,98 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nodegroupset - -import ( - "testing" - - "k8s.io/autoscaler/cluster-autoscaler/config" - "k8s.io/autoscaler/cluster-autoscaler/context" - . "k8s.io/autoscaler/cluster-autoscaler/utils/test" -) - -func TestIsClusterAPINodeInfoSimilar(t *testing.T) { - comparator := CreateClusterAPINodeInfoComparator([]string{}, config.NodeGroupDifferenceRatios{}) - node1 := BuildTestNode("node1", 1000, 2000) - node2 := BuildTestNode("node2", 1000, 2000) - - for _, tc := range []struct { - description string - label string - value1 string - value2 string - removeOneLabel bool - }{ - { - description: "topology.ebs.csi.aws.com/zone empty value", - label: "topology.ebs.csi.aws.com/zone", - value1: "", - value2: "", - removeOneLabel: false, - }, - { - description: "topology.ebs.csi.aws.com/zone different values", - label: "topology.ebs.csi.aws.com/zone", - value1: "foo", - value2: "bar", - removeOneLabel: false, - }, - { - description: "topology.diskplugin.csi.alibabacloud.com/zone different values", - label: "topology.diskplugin.csi.alibabacloud.com/zone", - value1: "foo", - value2: "bar", - removeOneLabel: false, - }, - { - description: "ibm-cloud.kubernetes.io/worker-id different values", - label: "ibm-cloud.kubernetes.io/worker-id", - value1: "foo", - value2: "bar", - removeOneLabel: false, - }, - { - description: "vpc-block-csi-driver-labels different values", - label: "vpc-block-csi-driver-labels", - value1: "foo", - value2: "bar", - removeOneLabel: false, - }, - { - description: "topology.ebs.csi.aws.com/zone one node labeled", - label: "topology.ebs.csi.aws.com/zone", - value1: "foo", - value2: "bar", - removeOneLabel: true, - }, - } { - t.Run(tc.description, func(t *testing.T) { - node1.ObjectMeta.Labels[tc.label] = tc.value1 - node2.ObjectMeta.Labels[tc.label] = tc.value2 - if tc.removeOneLabel { - delete(node2.ObjectMeta.Labels, tc.label) - } - checkNodesSimilar(t, node1, node2, comparator, true) - }) - } -} - -func TestFindSimilarNodeGroupsClusterAPIBasic(t *testing.T) { - context := &context.AutoscalingContext{} - ni1, ni2, ni3 := buildBasicNodeGroups(context) - processor := &BalancingNodeGroupSetProcessor{Comparator: CreateClusterAPINodeInfoComparator([]string{}, config.NodeGroupDifferenceRatios{})} - basicSimilarNodeGroupsTest(t, context, processor, ni1, ni2, ni3) -} From ea845d350862cfb802126307fc3333d12388948a Mon Sep 17 00:00:00 2001 From: Benjamin Pineau Date: Mon, 16 Jan 2023 15:32:52 +0100 Subject: [PATCH 22/35] gRPC expander: allow realistic server responses, and log errors Expander requests' payloads can be rather heavy under upscale pressure, as they're compounding all candidates options and unschedulable pods that could fit each options. Expander responses are a subset of the requests' payload items. We're allowing ourself to send arbitrary payload sizes (gRPC `defaultClientMaxSendMessageSize` is `math.MaxInt32`), but we're prone to drop expander servers responses to the floor, due to the `4MiB` `defaultClientMaxReceiveMessageSize`. The arbitrary 128MiB value is meant to be huge (enough to support eg. several dozen fat 1MiB pods) but not unlimited. Let me know if you'd rather see that turned to be a command line flag, or an other value. Also logging the possible gRPC call errors, as that of great help to diagnose that kind of issues. --- .../expander/grpcplugin/grpc_client.go | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/cluster-autoscaler/expander/grpcplugin/grpc_client.go b/cluster-autoscaler/expander/grpcplugin/grpc_client.go index 7800bd270136..b31de9fa4020 100644 --- a/cluster-autoscaler/expander/grpcplugin/grpc_client.go +++ b/cluster-autoscaler/expander/grpcplugin/grpc_client.go @@ -31,7 +31,10 @@ import ( "google.golang.org/grpc/credentials" ) -const gRPCTimeout = 5 * time.Second +const ( + gRPCTimeout = 5 * time.Second + gRPCMaxRecvMsgSize = 128 << 20 +) type grpcclientstrategy struct { grpcClient protos.ExpanderClient @@ -47,8 +50,6 @@ func NewFilter(expanderCert string, expanderUrl string) expander.Filter { } func createGRPCClient(expanderCert string, expanderUrl string) protos.ExpanderClient { - var dialOpt grpc.DialOption - if expanderCert == "" { log.Fatalf("GRPC Expander Cert not specified, insecure connections not allowed") return nil @@ -58,9 +59,12 @@ func createGRPCClient(expanderCert string, expanderUrl string) protos.ExpanderCl log.Fatalf("Failed to create TLS credentials %v", err) return nil } - dialOpt = grpc.WithTransportCredentials(creds) - klog.V(2).Infof("Dialing: %s with dialopt: %v", expanderUrl, dialOpt) - conn, err := grpc.Dial(expanderUrl, dialOpt) + dialOpts := []grpc.DialOption{ + grpc.WithTransportCredentials(creds), + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(gRPCMaxRecvMsgSize)), + } + klog.V(2).Infof("Dialing: %s with dialopt: %v", expanderUrl, dialOpts) + conn, err := grpc.Dial(expanderUrl, dialOpts...) if err != nil { log.Fatalf("Fail to dial server: %v", err) return nil @@ -84,7 +88,7 @@ func (g *grpcclientstrategy) BestOptions(expansionOptions []expander.Option, nod defer cancel() bestOptionsResponse, err := g.grpcClient.BestOptions(ctx, &protos.BestOptionsRequest{Options: grpcOptionsSlice, NodeMap: grpcNodeMap}) if err != nil { - klog.V(4).Info("GRPC call timed out, no options filtered") + klog.V(4).Infof("GRPC call failed, no options filtered: %v", err) return expansionOptions } From ed65377cb26762fc8dd7340504a41488eed4fab4 Mon Sep 17 00:00:00 2001 From: shubham82 Date: Wed, 18 Jan 2023 16:33:11 +0900 Subject: [PATCH 23/35] Added link for RFC3986 in host.go --- .../cloudprovider/aws/aws-sdk-go/private/protocol/host.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cluster-autoscaler/cloudprovider/aws/aws-sdk-go/private/protocol/host.go b/cluster-autoscaler/cloudprovider/aws/aws-sdk-go/private/protocol/host.go index eb7f8dc7dd75..ad3ab350452e 100644 --- a/cluster-autoscaler/cloudprovider/aws/aws-sdk-go/private/protocol/host.go +++ b/cluster-autoscaler/cloudprovider/aws/aws-sdk-go/private/protocol/host.go @@ -8,7 +8,7 @@ import ( ) // ValidateEndpointHostHandler is a request handler that will validate the -// request endpoint's hosts is a valid RFC 3986 host. +// request endpoint's hosts is a valid RFC 3986 (https://www.ietf.org/rfc/rfc3986.txt) host. var ValidateEndpointHostHandler = request.NamedHandler{ Name: "awssdk.protocol.ValidateEndpointHostHandler", Fn: func(r *request.Request) { @@ -20,7 +20,7 @@ var ValidateEndpointHostHandler = request.NamedHandler{ } // ValidateEndpointHost validates that the host string passed in is a valid RFC -// 3986 host. Returns error if the host is not valid. +// 3986 (https://www.ietf.org/rfc/rfc3986.txt) host. Returns error if the host is not valid. func ValidateEndpointHost(opName, host string) error { paramErrs := request.ErrInvalidParams{Context: opName} @@ -71,7 +71,7 @@ func ValidateEndpointHost(opName, host string) error { return nil } -// ValidHostLabel returns if the label is a valid RFC 3986 host label. +// ValidHostLabel returns if the label is a valid RFC 3986 (https://www.ietf.org/rfc/rfc3986.txt) host label. func ValidHostLabel(label string) bool { if l := len(label); l == 0 || l > 63 { return false @@ -90,7 +90,7 @@ func ValidHostLabel(label string) bool { return true } -// ValidPortNumber return if the port is valid RFC 3986 port +// ValidPortNumber return if the port is valid RFC 3986 (https://www.ietf.org/rfc/rfc3986.txt) port func ValidPortNumber(port string) bool { i, err := strconv.Atoi(port) if err != nil { From 1e1615ad6318a948c0c0a7320bd8fcf924040262 Mon Sep 17 00:00:00 2001 From: michael mccune Date: Wed, 18 Jan 2023 16:57:17 -0500 Subject: [PATCH 24/35] add an extra note to clusterapi readme about gpus this change adds a little more detail to ensure that users understand how to use the GPU label feature. --- cluster-autoscaler/cloudprovider/clusterapi/README.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/cluster-autoscaler/cloudprovider/clusterapi/README.md b/cluster-autoscaler/cloudprovider/clusterapi/README.md index 56ba6a9cf4a0..4dfd1c183152 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/README.md +++ b/cluster-autoscaler/cloudprovider/clusterapi/README.md @@ -361,6 +361,16 @@ CAPI cloudprovider, the label format is as follows: `` is arbitrary. +It is important to note that if you are using the `--gpu-total` flag to limit the number +of GPU resources in your cluster that the `` value must match +between the command line flag and the node labels. Setting these values incorrectly +can lead to the autoscaler creating too many GPU resources. + +For example, if you are using the autoscaler command line flag +`--gpu-total=gfx-hardware:1:2` to limit the number of `gfx-hardware` resources +to a minimum of 1 and maximum of 2, then you should use the kubelet node label flag +`--node-labels=cluster-api/accelerator=gfx-hardware`. + ## Special note on balancing similar node groups The Cluster Autoscaler feature to enable balancing similar node groups From 51914734ac5015306d39a597746818e5b3a7ec80 Mon Sep 17 00:00:00 2001 From: Hakan Bostan Date: Thu, 19 Jan 2023 11:31:31 +0000 Subject: [PATCH 25/35] Rephrase error messages specific for GPUs. Some error messages related to custom resources assumes that the custom resource is always GPU. Rephrasing these error messages to clear up any confusion. --- cluster-autoscaler/core/scaledown/resource/limits.go | 4 ++-- cluster-autoscaler/core/scaleup/resource_manager.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cluster-autoscaler/core/scaledown/resource/limits.go b/cluster-autoscaler/core/scaledown/resource/limits.go index 625ed4f15dff..b87ef0af9d5b 100644 --- a/cluster-autoscaler/core/scaledown/resource/limits.go +++ b/cluster-autoscaler/core/scaledown/resource/limits.go @@ -128,7 +128,7 @@ func (lf *LimitsFinder) customResourcesTotal(context *context.AutoscalingContext } nodeGroup, err := context.CloudProvider.NodeGroupForNode(node) if err != nil { - return nil, errors.ToAutoscalerError(errors.CloudProviderError, err).AddPrefix("can not get node group for node %v when calculating cluster gpu usage", node.Name) + return nil, errors.ToAutoscalerError(errors.CloudProviderError, err).AddPrefix("cannot get node group for node %v when calculating cluster custom resource usage", node.Name) } if nodeGroup == nil || reflect.ValueOf(nodeGroup).IsNil() { // We do not trust cloud providers to return properly constructed nil for interface type - hence the reflection check. @@ -146,7 +146,7 @@ func (lf *LimitsFinder) customResourcesTotal(context *context.AutoscalingContext if !cacheHit { resourceTargets, err = lf.crp.GetNodeResourceTargets(context, node, nodeGroup) if err != nil { - return nil, errors.ToAutoscalerError(errors.CloudProviderError, err).AddPrefix("can not get gpu count for node %v when calculating cluster gpu usage") + return nil, errors.ToAutoscalerError(errors.CloudProviderError, err).AddPrefix("cannot get custom resource count for node %v when calculating cluster custom resource usage") } if nodeGroup != nil { ngCache[nodeGroup.Id()] = resourceTargets diff --git a/cluster-autoscaler/core/scaleup/resource_manager.go b/cluster-autoscaler/core/scaleup/resource_manager.go index 568e10f10bfa..12c0710fcf82 100644 --- a/cluster-autoscaler/core/scaleup/resource_manager.go +++ b/cluster-autoscaler/core/scaleup/resource_manager.go @@ -249,7 +249,7 @@ func (m *ResourceManager) customResourcesTotal(ctx *context.AutoscalingContext, if currentSize > 0 { resourceTargets, err := m.crp.GetNodeResourceTargets(ctx, nodeInfo.Node(), nodeGroup) if err != nil { - return nil, errors.ToAutoscalerError(errors.CloudProviderError, err).AddPrefix("failed to get target gpu for node group %v: ", nodeGroup.Id()) + return nil, errors.ToAutoscalerError(errors.CloudProviderError, err).AddPrefix("failed to get custom resource target for node group %v: ", nodeGroup.Id()) } for _, resourceTarget := range resourceTargets { @@ -264,7 +264,7 @@ func (m *ResourceManager) customResourcesTotal(ctx *context.AutoscalingContext, for _, node := range nodesFromNotAutoscaledGroups { resourceTargets, err := m.crp.GetNodeResourceTargets(ctx, node, nil) if err != nil { - return nil, errors.ToAutoscalerError(errors.CloudProviderError, err).AddPrefix("failed to get target gpu for node gpus count for node %v: ", node.Name) + return nil, errors.ToAutoscalerError(errors.CloudProviderError, err).AddPrefix("failed to get custom resource target for node %v: ", node.Name) } for _, resourceTarget := range resourceTargets { From 97159df69b0a67e0d6754614722a7d34e67f5d39 Mon Sep 17 00:00:00 2001 From: Yaroslava Serdiuk Date: Tue, 17 Jan 2023 10:19:56 +0000 Subject: [PATCH 26/35] Add scale down candidates observer --- cluster-autoscaler/core/static_autoscaler.go | 3 +- cluster-autoscaler/core/test/common.go | 20 ++++---- cluster-autoscaler/processors/processors.go | 26 ++++++---- .../scale_down_candidates_observer.go | 51 +++++++++++++++++++ 4 files changed, 79 insertions(+), 21 deletions(-) create mode 100644 cluster-autoscaler/processors/scaledowncandidates/scale_down_candidates_observer.go diff --git a/cluster-autoscaler/core/static_autoscaler.go b/cluster-autoscaler/core/static_autoscaler.go index afb860c5a1c4..08870a1d8573 100644 --- a/cluster-autoscaler/core/static_autoscaler.go +++ b/cluster-autoscaler/core/static_autoscaler.go @@ -163,6 +163,7 @@ func NewStaticAutoscaler( } clusterStateRegistry := clusterstate.NewClusterStateRegistry(autoscalingContext.CloudProvider, clusterStateConfig, autoscalingContext.LogRecorder, backoff) + processors.ScaleDownCandidatesNotifier.Register(clusterStateRegistry) deleteOptions := simulator.NodeDeleteOptions{ SkipNodesWithSystemPods: opts.SkipNodesWithSystemPods, @@ -571,7 +572,7 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) errors.AutoscalerError typedErr := a.scaleDownPlanner.UpdateClusterState(podDestinations, scaleDownCandidates, actuationStatus, pdbs, currentTime) // Update clusterStateRegistry and metrics regardless of whether ScaleDown was successful or not. unneededNodes := a.scaleDownPlanner.UnneededNodes() - a.clusterStateRegistry.UpdateScaleDownCandidates(unneededNodes, currentTime) + a.processors.ScaleDownCandidatesNotifier.Update(unneededNodes, currentTime) metrics.UpdateUnneededNodesCount(len(unneededNodes)) if typedErr != nil { scaleDownStatus.Result = scaledownstatus.ScaleDownError diff --git a/cluster-autoscaler/core/test/common.go b/cluster-autoscaler/core/test/common.go index 66869456088c..8a57ec508dea 100644 --- a/cluster-autoscaler/core/test/common.go +++ b/cluster-autoscaler/core/test/common.go @@ -45,6 +45,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/processors/nodeinfos" "k8s.io/autoscaler/cluster-autoscaler/processors/nodeinfosprovider" "k8s.io/autoscaler/cluster-autoscaler/processors/nodes" + "k8s.io/autoscaler/cluster-autoscaler/processors/scaledowncandidates" "k8s.io/autoscaler/cluster-autoscaler/processors/status" "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" @@ -143,15 +144,16 @@ func NewTestProcessors(context *context.AutoscalingContext) *processors.Autoscal NodeGroupSetProcessor: nodegroupset.NewDefaultNodeGroupSetProcessor([]string{}, config.NodeGroupDifferenceRatios{}), ScaleDownSetProcessor: nodes.NewPostFilteringScaleDownNodeProcessor(), // TODO(bskiba): change scale up test so that this can be a NoOpProcessor - ScaleUpStatusProcessor: &status.EventingScaleUpStatusProcessor{}, - ScaleDownStatusProcessor: &status.NoOpScaleDownStatusProcessor{}, - AutoscalingStatusProcessor: &status.NoOpAutoscalingStatusProcessor{}, - NodeGroupManager: nodegroups.NewDefaultNodeGroupManager(), - NodeInfoProcessor: nodeinfos.NewDefaultNodeInfoProcessor(), - TemplateNodeInfoProvider: nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil), - NodeGroupConfigProcessor: nodegroupconfig.NewDefaultNodeGroupConfigProcessor(), - CustomResourcesProcessor: customresources.NewDefaultCustomResourcesProcessor(), - ActionableClusterProcessor: actionablecluster.NewDefaultActionableClusterProcessor(), + ScaleUpStatusProcessor: &status.EventingScaleUpStatusProcessor{}, + ScaleDownStatusProcessor: &status.NoOpScaleDownStatusProcessor{}, + AutoscalingStatusProcessor: &status.NoOpAutoscalingStatusProcessor{}, + NodeGroupManager: nodegroups.NewDefaultNodeGroupManager(), + NodeInfoProcessor: nodeinfos.NewDefaultNodeInfoProcessor(), + TemplateNodeInfoProvider: nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil), + NodeGroupConfigProcessor: nodegroupconfig.NewDefaultNodeGroupConfigProcessor(), + CustomResourcesProcessor: customresources.NewDefaultCustomResourcesProcessor(), + ActionableClusterProcessor: actionablecluster.NewDefaultActionableClusterProcessor(), + ScaleDownCandidatesNotifier: scaledowncandidates.NewObserversList(), } } diff --git a/cluster-autoscaler/processors/processors.go b/cluster-autoscaler/processors/processors.go index 79ba3456c11a..b6a396098d52 100644 --- a/cluster-autoscaler/processors/processors.go +++ b/cluster-autoscaler/processors/processors.go @@ -27,6 +27,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/processors/nodeinfosprovider" "k8s.io/autoscaler/cluster-autoscaler/processors/nodes" "k8s.io/autoscaler/cluster-autoscaler/processors/pods" + "k8s.io/autoscaler/cluster-autoscaler/processors/scaledowncandidates" "k8s.io/autoscaler/cluster-autoscaler/processors/status" ) @@ -61,6 +62,8 @@ type AutoscalingProcessors struct { CustomResourcesProcessor customresources.CustomResourcesProcessor // ActionableClusterProcessor is interface defining whether the cluster is in an actionable state ActionableClusterProcessor actionablecluster.ActionableClusterProcessor + // ScaleDownCandidatesNotifier is used to Update and Register new scale down candidates observer. + ScaleDownCandidatesNotifier *scaledowncandidates.ObserversList } // DefaultProcessors returns default set of processors. @@ -73,17 +76,18 @@ func DefaultProcessors() *AutoscalingProcessors { MaxCapacityMemoryDifferenceRatio: config.DefaultMaxCapacityMemoryDifferenceRatio, MaxFreeDifferenceRatio: config.DefaultMaxFreeDifferenceRatio, }), - ScaleUpStatusProcessor: status.NewDefaultScaleUpStatusProcessor(), - ScaleDownNodeProcessor: nodes.NewPreFilteringScaleDownNodeProcessor(), - ScaleDownSetProcessor: nodes.NewPostFilteringScaleDownNodeProcessor(), - ScaleDownStatusProcessor: status.NewDefaultScaleDownStatusProcessor(), - AutoscalingStatusProcessor: status.NewDefaultAutoscalingStatusProcessor(), - NodeGroupManager: nodegroups.NewDefaultNodeGroupManager(), - NodeInfoProcessor: nodeinfos.NewDefaultNodeInfoProcessor(), - NodeGroupConfigProcessor: nodegroupconfig.NewDefaultNodeGroupConfigProcessor(), - CustomResourcesProcessor: customresources.NewDefaultCustomResourcesProcessor(), - ActionableClusterProcessor: actionablecluster.NewDefaultActionableClusterProcessor(), - TemplateNodeInfoProvider: nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil), + ScaleUpStatusProcessor: status.NewDefaultScaleUpStatusProcessor(), + ScaleDownNodeProcessor: nodes.NewPreFilteringScaleDownNodeProcessor(), + ScaleDownSetProcessor: nodes.NewPostFilteringScaleDownNodeProcessor(), + ScaleDownStatusProcessor: status.NewDefaultScaleDownStatusProcessor(), + AutoscalingStatusProcessor: status.NewDefaultAutoscalingStatusProcessor(), + NodeGroupManager: nodegroups.NewDefaultNodeGroupManager(), + NodeInfoProcessor: nodeinfos.NewDefaultNodeInfoProcessor(), + NodeGroupConfigProcessor: nodegroupconfig.NewDefaultNodeGroupConfigProcessor(), + CustomResourcesProcessor: customresources.NewDefaultCustomResourcesProcessor(), + ActionableClusterProcessor: actionablecluster.NewDefaultActionableClusterProcessor(), + TemplateNodeInfoProvider: nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil), + ScaleDownCandidatesNotifier: scaledowncandidates.NewObserversList(), } } diff --git a/cluster-autoscaler/processors/scaledowncandidates/scale_down_candidates_observer.go b/cluster-autoscaler/processors/scaledowncandidates/scale_down_candidates_observer.go new file mode 100644 index 000000000000..34f14a8797db --- /dev/null +++ b/cluster-autoscaler/processors/scaledowncandidates/scale_down_candidates_observer.go @@ -0,0 +1,51 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scaledowncandidates + +import ( + "time" + + apiv1 "k8s.io/api/core/v1" +) + +// Observer is an observer of scale down candidates +type Observer interface { + // UpdateScaleDownCandidates updates scale down candidates. + UpdateScaleDownCandidates([]*apiv1.Node, time.Time) +} + +// ObserversList is a slice of observers of scale down candidates +type ObserversList struct { + observers []Observer +} + +// Register adds new observer to the list. +func (l *ObserversList) Register(o Observer) { + l.observers = append(l.observers, o) +} + +// Update updates scale down candidates for each observer. +func (l *ObserversList) Update(nodes []*apiv1.Node, now time.Time) { + for _, observer := range l.observers { + observer.UpdateScaleDownCandidates(nodes, now) + } +} + +// NewObserversList return empty list of observers. +func NewObserversList() *ObserversList { + return &ObserversList{} +} From 541ce04e4b862b2714f2366e4e1298a94b97cfbe Mon Sep 17 00:00:00 2001 From: Yaroslava Serdiuk Date: Tue, 17 Jan 2023 17:25:42 +0000 Subject: [PATCH 27/35] Add previous scale down candidate sorting --- cluster-autoscaler/main.go | 6 ++ .../nodes/previous_candidates_sorting.go | 54 ++++++++++++++ .../nodes/previous_candidates_sorting_test.go | 73 +++++++++++++++++++ ...scale_down_candidates_sorting_processor.go | 68 +++++++++++++++++ 4 files changed, 201 insertions(+) create mode 100644 cluster-autoscaler/processors/nodes/previous_candidates_sorting.go create mode 100644 cluster-autoscaler/processors/nodes/previous_candidates_sorting_test.go create mode 100644 cluster-autoscaler/processors/nodes/scale_down_candidates_sorting_processor.go diff --git a/cluster-autoscaler/main.go b/cluster-autoscaler/main.go index d1562adefbcd..bd5a26dc9fc5 100644 --- a/cluster-autoscaler/main.go +++ b/cluster-autoscaler/main.go @@ -49,6 +49,7 @@ import ( ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors" "k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupset" "k8s.io/autoscaler/cluster-autoscaler/processors/nodeinfosprovider" + "k8s.io/autoscaler/cluster-autoscaler/processors/nodes" "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" @@ -403,6 +404,11 @@ func buildAutoscaler(debuggingSnapshotter debuggingsnapshot.DebuggingSnapshotter podlistprocessor.NewCurrentlyDrainedNodesPodListProcessor(), podlistprocessor.NewFilterOutSchedulablePodListProcessor(opts.PredicateChecker), ) + if autoscalingOptions.ParallelDrain { + sdProcessor := nodes.NewScaleDownCandidatesSortingProcessor() + opts.Processors.ScaleDownNodeProcessor = sdProcessor + opts.Processors.ScaleDownCandidatesNotifier.Register(sdProcessor) + } var nodeInfoComparator nodegroupset.NodeInfoComparator if len(autoscalingOptions.BalancingLabels) > 0 { diff --git a/cluster-autoscaler/processors/nodes/previous_candidates_sorting.go b/cluster-autoscaler/processors/nodes/previous_candidates_sorting.go new file mode 100644 index 000000000000..80ff6e8f4fe9 --- /dev/null +++ b/cluster-autoscaler/processors/nodes/previous_candidates_sorting.go @@ -0,0 +1,54 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodes + +import ( + "time" + + apiv1 "k8s.io/api/core/v1" +) + +// PreviousCandidates is a struct that store scale down candidates from previous loop. +type PreviousCandidates struct { + candidates map[string]bool +} + +// NewPreviousCandidates return empty PreviousCandidates struct. +func NewPreviousCandidates() *PreviousCandidates { + return &PreviousCandidates{} +} + +// UpdateScaleDownCandidates updates scale down candidates. +func (p *PreviousCandidates) UpdateScaleDownCandidates(nodes []*apiv1.Node, now time.Time) { + result := make(map[string]bool) + for _, node := range nodes { + result[node.Name] = true + } + p.candidates = result +} + +// ScaleDownEarlierThan return true if node1 is in candidate list and node2 isn't. +func (p *PreviousCandidates) ScaleDownEarlierThan(node1, node2 *apiv1.Node) bool { + if p.isPreviousCandidate(node1) && !(p.isPreviousCandidate(node2)) { + return true + } + return false +} + +func (p *PreviousCandidates) isPreviousCandidate(node *apiv1.Node) bool { + return p.candidates[node.Name] +} diff --git a/cluster-autoscaler/processors/nodes/previous_candidates_sorting_test.go b/cluster-autoscaler/processors/nodes/previous_candidates_sorting_test.go new file mode 100644 index 000000000000..67c2ef4f5ae6 --- /dev/null +++ b/cluster-autoscaler/processors/nodes/previous_candidates_sorting_test.go @@ -0,0 +1,73 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodes + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + apiv1 "k8s.io/api/core/v1" + . "k8s.io/autoscaler/cluster-autoscaler/utils/test" +) + +func TestScaleDownEarlierThan(t *testing.T) { + candidate1 := BuildTestNode("candidate1", 100, 0) + candidate2 := BuildTestNode("candidate2", 100, 0) + nonCandidate1 := BuildTestNode("non-candidate1", 100, 0) + nonCandidate2 := BuildTestNode("non-candidate2", 100, 0) + + p := NewPreviousCandidates() + p.UpdateScaleDownCandidates([]*apiv1.Node{candidate1, candidate2}, time.Now()) + testCases := []struct { + name string + node1 *apiv1.Node + node2 *apiv1.Node + want bool + }{ + { + name: "Compare two candidates", + node1: candidate1, + node2: candidate2, + want: false, + }, + { + name: "Compare two non-candidates", + node1: nonCandidate1, + node2: nonCandidate2, + want: false, + }, + { + name: "Compare candidate and non-candidate", + node1: candidate1, + node2: nonCandidate2, + want: true, + }, + { + name: "Compare non-candidate and candidate", + node1: nonCandidate1, + node2: candidate2, + want: false, + }, + } + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + got := p.ScaleDownEarlierThan(test.node1, test.node2) + assert.Equal(t, got, test.want) + }) + } +} diff --git a/cluster-autoscaler/processors/nodes/scale_down_candidates_sorting_processor.go b/cluster-autoscaler/processors/nodes/scale_down_candidates_sorting_processor.go new file mode 100644 index 000000000000..e5b07b6fa780 --- /dev/null +++ b/cluster-autoscaler/processors/nodes/scale_down_candidates_sorting_processor.go @@ -0,0 +1,68 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodes + +import ( + "sort" + "time" + + apiv1 "k8s.io/api/core/v1" + + "k8s.io/autoscaler/cluster-autoscaler/context" + "k8s.io/autoscaler/cluster-autoscaler/utils/errors" +) + +// ScaleDownCandidatesSortingProcessor is a wrapper for preFilteringProcessor that takes into account previous +// scale down candidates. This is necessary for efficient parallel scale down. +type ScaleDownCandidatesSortingProcessor struct { + preFilter *PreFilteringScaleDownNodeProcessor + previousCandidates *PreviousCandidates +} + +// GetPodDestinationCandidates returns nodes that potentially could act as destinations for pods +// that would become unscheduled after a scale down. +func (p *ScaleDownCandidatesSortingProcessor) GetPodDestinationCandidates(ctx *context.AutoscalingContext, + nodes []*apiv1.Node) ([]*apiv1.Node, errors.AutoscalerError) { + return p.preFilter.GetPodDestinationCandidates(ctx, nodes) +} + +// GetScaleDownCandidates returns filter nodes and move previous scale down candidates to the beginning of the list. +func (p *ScaleDownCandidatesSortingProcessor) GetScaleDownCandidates(ctx *context.AutoscalingContext, + nodes []*apiv1.Node) ([]*apiv1.Node, errors.AutoscalerError) { + candidates, err := p.preFilter.GetScaleDownCandidates(ctx, nodes) + if err != nil { + return candidates, err + } + sort.Slice(candidates, func(i, j int) bool { + return p.previousCandidates.ScaleDownEarlierThan(candidates[i], candidates[j]) + }) + return candidates, nil +} + +// CleanUp is called at CA termination. +func (p *ScaleDownCandidatesSortingProcessor) CleanUp() { +} + +// NewScaleDownCandidatesSortingProcessor returns a new PreFilteringScaleDownNodeProcessor. +func NewScaleDownCandidatesSortingProcessor() *ScaleDownCandidatesSortingProcessor { + return &ScaleDownCandidatesSortingProcessor{preFilter: NewPreFilteringScaleDownNodeProcessor(), previousCandidates: NewPreviousCandidates()} +} + +// UpdateScaleDownCandidates updates scale down candidates. +func (p *ScaleDownCandidatesSortingProcessor) UpdateScaleDownCandidates(nodes []*apiv1.Node, now time.Time) { + p.previousCandidates.UpdateScaleDownCandidates(nodes, now) +} From f1a78ad277848018e8e4b034c3b7d52baa2f5c8f Mon Sep 17 00:00:00 2001 From: "janis.orlovs" Date: Fri, 20 Jan 2023 12:20:51 +0200 Subject: [PATCH 28/35] OCI OKE autoscaller requires different set of permissions. Without this autoscaller is not working --- .../oci-ip-cluster-autoscaler-w-config.yaml | 21 +++++++++---------- ...ci-ip-cluster-autoscaler-w-principals.yaml | 21 +++++++++---------- 2 files changed, 20 insertions(+), 22 deletions(-) diff --git a/cluster-autoscaler/cloudprovider/oci/examples/oci-ip-cluster-autoscaler-w-config.yaml b/cluster-autoscaler/cloudprovider/oci/examples/oci-ip-cluster-autoscaler-w-config.yaml index b4fe234a62f4..a2c35b6a7fcc 100644 --- a/cluster-autoscaler/cloudprovider/oci/examples/oci-ip-cluster-autoscaler-w-config.yaml +++ b/cluster-autoscaler/cloudprovider/oci/examples/oci-ip-cluster-autoscaler-w-config.yaml @@ -15,6 +15,9 @@ metadata: k8s-addon: cluster-autoscaler.addons.k8s.io k8s-app: cluster-autoscaler rules: + - apiGroups: ["storage.k8s.io"] + resources: ["csidriver", "csistoragecapacities"] + verbs: ["watch", "list"] - apiGroups: [""] resources: ["events", "endpoints"] verbs: ["create", "patch"] @@ -24,16 +27,16 @@ rules: - apiGroups: [""] resources: ["pods/status"] verbs: ["update"] + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["endpoints"] resourceNames: ["cluster-autoscaler"] verbs: ["get", "update"] - apiGroups: [""] resources: ["nodes"] - verbs: ["watch", "list", "get", "update"] - - apiGroups: [""] - resources: ["namepaces"] - verbs: ["list"] + verbs: ["watch", "list", "get", "patch", "update"] - apiGroups: [""] resources: - "pods" @@ -53,10 +56,10 @@ rules: verbs: ["watch", "list", "get"] - apiGroups: ["storage.k8s.io"] resources: ["storageclasses", "csinodes"] - verbs: ["get", "list", "watch"] - - apiGroups: ["batch"] - resources: ["jobs", "cronjobs"] verbs: ["watch", "list", "get"] + - apiGroups: ["batch", "extensions"] + resources: ["jobs"] + verbs: ["get", "list", "watch", "patch"] - apiGroups: ["coordination.k8s.io"] resources: ["leases"] verbs: ["create"] @@ -64,10 +67,6 @@ rules: resourceNames: ["cluster-autoscaler"] resources: ["leases"] verbs: ["get", "update"] - - apiGroups: ["storage.k8s.io"] - resources: ["csidrivers", "csistoragecapacities"] - verbs: ["get", "list"] - --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role diff --git a/cluster-autoscaler/cloudprovider/oci/examples/oci-ip-cluster-autoscaler-w-principals.yaml b/cluster-autoscaler/cloudprovider/oci/examples/oci-ip-cluster-autoscaler-w-principals.yaml index bf5456179a48..fa4a8d4700cd 100644 --- a/cluster-autoscaler/cloudprovider/oci/examples/oci-ip-cluster-autoscaler-w-principals.yaml +++ b/cluster-autoscaler/cloudprovider/oci/examples/oci-ip-cluster-autoscaler-w-principals.yaml @@ -15,6 +15,9 @@ metadata: k8s-addon: cluster-autoscaler.addons.k8s.io k8s-app: cluster-autoscaler rules: + - apiGroups: ["storage.k8s.io"] + resources: ["csidriver", "csistoragecapacities"] + verbs: ["watch", "list"] - apiGroups: [""] resources: ["events", "endpoints"] verbs: ["create", "patch"] @@ -24,16 +27,16 @@ rules: - apiGroups: [""] resources: ["pods/status"] verbs: ["update"] + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["endpoints"] resourceNames: ["cluster-autoscaler"] verbs: ["get", "update"] - apiGroups: [""] resources: ["nodes"] - verbs: ["watch", "list", "get", "update"] - - apiGroups: [""] - resources: ["namespaces"] - verbs: ["list"] + verbs: ["watch", "list", "get", "patch", "update"] - apiGroups: [""] resources: - "pods" @@ -53,10 +56,10 @@ rules: verbs: ["watch", "list", "get"] - apiGroups: ["storage.k8s.io"] resources: ["storageclasses", "csinodes"] - verbs: ["get", "list", "watch"] - - apiGroups: ["batch"] - resources: ["jobs", "cronjobs"] verbs: ["watch", "list", "get"] + - apiGroups: ["batch", "extensions"] + resources: ["jobs"] + verbs: ["get", "list", "watch", "patch"] - apiGroups: ["coordination.k8s.io"] resources: ["leases"] verbs: ["create"] @@ -64,10 +67,6 @@ rules: resourceNames: ["cluster-autoscaler"] resources: ["leases"] verbs: ["get", "update"] - - apiGroups: ["storage.k8s.io"] - resources: ["csidrivers", "csistoragecapacities"] - verbs: ["get", "list"] - --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role From 0be995deb5071097a4d7b846e9ee9746821b6e87 Mon Sep 17 00:00:00 2001 From: Yaroslava Serdiuk Date: Thu, 19 Jan 2023 14:25:41 +0000 Subject: [PATCH 29/35] Add os parameters to MigOsInfo interface --- .../cloudprovider/gce/gce_manager.go | 6 ++- .../cloudprovider/gce/gce_reserved.go | 36 ++++++++++++- .../cloudprovider/gce/gce_reserved_test.go | 3 +- .../cloudprovider/gce/os_reserved.go | 14 ++++- .../cloudprovider/gce/templates.go | 53 +++++++++++-------- .../cloudprovider/gce/templates_test.go | 43 +++++++-------- 6 files changed, 105 insertions(+), 50 deletions(-) diff --git a/cluster-autoscaler/cloudprovider/gce/gce_manager.go b/cluster-autoscaler/cloudprovider/gce/gce_manager.go index 44d9f1301f63..da5d85122225 100644 --- a/cluster-autoscaler/cloudprovider/gce/gce_manager.go +++ b/cluster-autoscaler/cloudprovider/gce/gce_manager.go @@ -589,7 +589,11 @@ func (m *gceManagerImpl) GetMigTemplateNode(mig Mig) (*apiv1.Node, error) { if err != nil { return nil, err } - return m.templates.BuildNodeFromTemplate(mig, template, machineType.CPU, machineType.Memory, nil, m.reserved) + migOsInfo, err := m.templates.MigOsInfo(mig.Id(), template) + if err != nil { + return nil, err + } + return m.templates.BuildNodeFromTemplate(mig, migOsInfo, template, machineType.CPU, machineType.Memory, nil, m.reserved) } // parseMIGAutoDiscoverySpecs returns any provided NodeGroupAutoDiscoverySpecs diff --git a/cluster-autoscaler/cloudprovider/gce/gce_reserved.go b/cluster-autoscaler/cloudprovider/gce/gce_reserved.go index 30f2741d4c55..19af3ba46b9e 100644 --- a/cluster-autoscaler/cloudprovider/gce/gce_reserved.go +++ b/cluster-autoscaler/cloudprovider/gce/gce_reserved.go @@ -87,7 +87,10 @@ type GceReserved struct{} // CalculateKernelReserved computes how much memory Linux kernel will reserve. // TODO(jkaniuk): account for crashkernel reservation on RHEL / CentOS -func (r *GceReserved) CalculateKernelReserved(physicalMemory int64, os OperatingSystem, osDistribution OperatingSystemDistribution, arch SystemArchitecture, nodeVersion string) int64 { +func (r *GceReserved) CalculateKernelReserved(m MigOsInfo, physicalMemory int64, nodeVersion string) int64 { + os := m.Os() + osDistribution := m.OsDistribution() + arch := m.Arch() switch os { case OperatingSystemLinux: // Account for memory reserved by kernel @@ -267,7 +270,9 @@ func EphemeralStorageOnLocalSSDFilesystemOverheadInBytes(diskCount int64, osDist } // CalculateOSReservedEphemeralStorage estimates how much ephemeral storage OS will reserve and eviction threshold -func (r *GceReserved) CalculateOSReservedEphemeralStorage(diskSize int64, os OperatingSystem, osDistribution OperatingSystemDistribution, arch SystemArchitecture, nodeVersion string) int64 { +func (r *GceReserved) CalculateOSReservedEphemeralStorage(m MigOsInfo, diskSize int64, nodeVersion string) int64 { + osDistribution := m.OsDistribution() + arch := m.Arch() switch osDistribution { case OperatingSystemDistributionCOS: storage := int64(math.Ceil(0.015635*float64(diskSize))) + int64(math.Ceil(4.148*GiB)) // os partition estimation @@ -289,3 +294,30 @@ func (r *GceReserved) CalculateOSReservedEphemeralStorage(diskSize int64, os Ope return 0 } } + +// GceMigOsInfo contains os details of nodes in gce mig. +type GceMigOsInfo struct { + os OperatingSystem + osDistribution OperatingSystemDistribution + arch SystemArchitecture +} + +// Os return operating system. +func (m *GceMigOsInfo) Os() OperatingSystem { + return m.os +} + +// OsDistribution return operating system distribution. +func (m *GceMigOsInfo) OsDistribution() OperatingSystemDistribution { + return m.osDistribution +} + +// Arch return system architecture. +func (m *GceMigOsInfo) Arch() SystemArchitecture { + return m.arch +} + +// NewMigOsInfo return gce implementation of MigOsInfo interface. +func NewMigOsInfo(os OperatingSystem, osDistribution OperatingSystemDistribution, arch SystemArchitecture) MigOsInfo { + return &GceMigOsInfo{os, osDistribution, arch} +} diff --git a/cluster-autoscaler/cloudprovider/gce/gce_reserved_test.go b/cluster-autoscaler/cloudprovider/gce/gce_reserved_test.go index 862ff8bf0ebb..bd1407a688c0 100644 --- a/cluster-autoscaler/cloudprovider/gce/gce_reserved_test.go +++ b/cluster-autoscaler/cloudprovider/gce/gce_reserved_test.go @@ -108,7 +108,8 @@ func TestCalculateKernelReservedLinux(t *testing.T) { for idx, tc := range testCases { r := &GceReserved{} t.Run(fmt.Sprintf("%v", idx), func(t *testing.T) { - reserved := r.CalculateKernelReserved(tc.physicalMemory, OperatingSystemLinux, tc.osDistribution, tc.arch, "") + m := NewMigOsInfo(OperatingSystemLinux, tc.osDistribution, tc.arch) + reserved := r.CalculateKernelReserved(m, tc.physicalMemory, "") if tc.osDistribution == OperatingSystemDistributionUbuntu { assert.Equal(t, tc.reservedMemory+int64(math.Min(correctionConstant*float64(tc.physicalMemory), maximumCorrectionValue)+ubuntuSpecificOffset), reserved) } else if tc.osDistribution == OperatingSystemDistributionCOS { diff --git a/cluster-autoscaler/cloudprovider/gce/os_reserved.go b/cluster-autoscaler/cloudprovider/gce/os_reserved.go index 245555f8c800..ff47d826c058 100644 --- a/cluster-autoscaler/cloudprovider/gce/os_reserved.go +++ b/cluster-autoscaler/cloudprovider/gce/os_reserved.go @@ -16,13 +16,23 @@ limitations under the License. package gce +// MigOsInfo store os parameters. +type MigOsInfo interface { + // Os return operating system. + Os() OperatingSystem + // OsDistribution return operating system distribution. + OsDistribution() OperatingSystemDistribution + // Arch return system architecture. + Arch() SystemArchitecture +} + // OsReservedCalculator calculates the OS reserved values. type OsReservedCalculator interface { // CalculateKernelReserved computes how much memory OS kernel will reserve. // NodeVersion parameter is optional. If empty string is passed a result calculated using default node version will be returned. - CalculateKernelReserved(physicalMemory int64, os OperatingSystem, osDistribution OperatingSystemDistribution, arch SystemArchitecture, nodeVersion string) int64 + CalculateKernelReserved(m MigOsInfo, physicalMemory int64, nodeVersion string) int64 // CalculateOSReservedEphemeralStorage estimates how much ephemeral storage OS will reserve and eviction threshold. // NodeVersion parameter is optional. If empty string is passed a result calculated using default node version will be returned. - CalculateOSReservedEphemeralStorage(diskSize int64, os OperatingSystem, osDistribution OperatingSystemDistribution, arch SystemArchitecture, nodeVersion string) int64 + CalculateOSReservedEphemeralStorage(m MigOsInfo, diskSize int64, nodeVersion string) int64 } diff --git a/cluster-autoscaler/cloudprovider/gce/templates.go b/cluster-autoscaler/cloudprovider/gce/templates.go index d54cc09101a7..6c6c567e3396 100644 --- a/cluster-autoscaler/cloudprovider/gce/templates.go +++ b/cluster-autoscaler/cloudprovider/gce/templates.go @@ -71,7 +71,7 @@ func (t *GceTemplateBuilder) getAcceleratorCount(accelerators []*gce.Accelerator } // BuildCapacity builds a list of resource capacities given list of hardware. -func (t *GceTemplateBuilder) BuildCapacity(cpu int64, mem int64, accelerators []*gce.AcceleratorConfig, os OperatingSystem, osDistribution OperatingSystemDistribution, arch SystemArchitecture, +func (t *GceTemplateBuilder) BuildCapacity(m MigOsInfo, cpu int64, mem int64, accelerators []*gce.AcceleratorConfig, ephemeralStorage int64, ephemeralStorageLocalSSDCount int64, pods *int64, version string, r OsReservedCalculator, extendedResources apiv1.ResourceList) (apiv1.ResourceList, error) { capacity := apiv1.ResourceList{} if pods == nil { @@ -81,7 +81,7 @@ func (t *GceTemplateBuilder) BuildCapacity(cpu int64, mem int64, accelerators [] } capacity[apiv1.ResourceCPU] = *resource.NewQuantity(cpu, resource.DecimalSI) - memTotal := mem - r.CalculateKernelReserved(mem, os, osDistribution, arch, version) + memTotal := mem - r.CalculateKernelReserved(m, mem, version) capacity[apiv1.ResourceMemory] = *resource.NewQuantity(memTotal, resource.DecimalSI) if accelerators != nil && len(accelerators) > 0 { @@ -91,9 +91,9 @@ func (t *GceTemplateBuilder) BuildCapacity(cpu int64, mem int64, accelerators [] if ephemeralStorage > 0 { var storageTotal int64 if ephemeralStorageLocalSSDCount > 0 { - storageTotal = ephemeralStorage - EphemeralStorageOnLocalSSDFilesystemOverheadInBytes(ephemeralStorageLocalSSDCount, osDistribution) + storageTotal = ephemeralStorage - EphemeralStorageOnLocalSSDFilesystemOverheadInBytes(ephemeralStorageLocalSSDCount, m.OsDistribution()) } else { - storageTotal = ephemeralStorage - r.CalculateOSReservedEphemeralStorage(ephemeralStorage, os, osDistribution, arch, version) + storageTotal = ephemeralStorage - r.CalculateOSReservedEphemeralStorage(m, ephemeralStorage, version) } capacity[apiv1.ResourceEphemeralStorage] = *resource.NewQuantity(int64(math.Max(float64(storageTotal), 0)), resource.DecimalSI) } @@ -160,8 +160,31 @@ func getKubeEnvValueFromTemplateMetadata(template *gce.InstanceTemplate) (string return "", nil } +// MigOsInfo return os detailes information that stored in template. +func (t *GceTemplateBuilder) MigOsInfo(migId string, template *gce.InstanceTemplate) (MigOsInfo, error) { + kubeEnvValue, err := getKubeEnvValueFromTemplateMetadata(template) + if err != nil { + return nil, fmt.Errorf("could not obtain kube-env from template metadata; %v", err) + } + os := extractOperatingSystemFromKubeEnv(kubeEnvValue) + if os == OperatingSystemUnknown { + return nil, fmt.Errorf("could not obtain os from kube-env from template metadata") + } + + osDistribution := extractOperatingSystemDistributionFromKubeEnv(kubeEnvValue) + if osDistribution == OperatingSystemDistributionUnknown { + return nil, fmt.Errorf("could not obtain os-distribution from kube-env from template metadata") + } + arch, err := extractSystemArchitectureFromKubeEnv(kubeEnvValue) + if err != nil { + arch = DefaultArch + klog.Errorf("Couldn't extract architecture from kube-env for MIG %q, falling back to %q. Error: %v", migId, arch, err) + } + return NewMigOsInfo(os, osDistribution, arch), nil +} + // BuildNodeFromTemplate builds node from provided GCE template. -func (t *GceTemplateBuilder) BuildNodeFromTemplate(mig Mig, template *gce.InstanceTemplate, cpu int64, mem int64, pods *int64, reserved OsReservedCalculator) (*apiv1.Node, error) { +func (t *GceTemplateBuilder) BuildNodeFromTemplate(mig Mig, migOsInfo MigOsInfo, template *gce.InstanceTemplate, cpu int64, mem int64, pods *int64, reserved OsReservedCalculator) (*apiv1.Node, error) { if template.Properties == nil { return nil, fmt.Errorf("instance template %s has no properties", template.Name) @@ -181,22 +204,6 @@ func (t *GceTemplateBuilder) BuildNodeFromTemplate(mig Mig, template *gce.Instan Labels: map[string]string{}, } - // This call is safe even if kubeEnvValue is empty - os := extractOperatingSystemFromKubeEnv(kubeEnvValue) - if os == OperatingSystemUnknown { - return nil, fmt.Errorf("could not obtain os from kube-env from template metadata") - } - - osDistribution := extractOperatingSystemDistributionFromKubeEnv(kubeEnvValue) - if osDistribution == OperatingSystemDistributionUnknown { - return nil, fmt.Errorf("could not obtain os-distribution from kube-env from template metadata") - } - arch, err := extractSystemArchitectureFromKubeEnv(kubeEnvValue) - if err != nil { - arch = DefaultArch - klog.Errorf("Couldn't extract architecture from kube-env for MIG %q, falling back to %q. Error: %v", mig.Id(), arch, err) - } - addBootDiskAnnotations(&node, template.Properties) var ephemeralStorage int64 = -1 if !isBootDiskEphemeralStorageWithInstanceTemplateDisabled(kubeEnvValue) { @@ -225,7 +232,7 @@ func (t *GceTemplateBuilder) BuildNodeFromTemplate(mig Mig, template *gce.Instan klog.Errorf("could not fetch extended resources from instance template: %v", err) } - capacity, err := t.BuildCapacity(cpu, mem, template.Properties.GuestAccelerators, os, osDistribution, arch, ephemeralStorage, ephemeralStorageLocalSsdCount, pods, mig.Version(), reserved, extendedResources) + capacity, err := t.BuildCapacity(migOsInfo, cpu, mem, template.Properties.GuestAccelerators, ephemeralStorage, ephemeralStorageLocalSsdCount, pods, mig.Version(), reserved, extendedResources) if err != nil { return nil, err } @@ -269,7 +276,7 @@ func (t *GceTemplateBuilder) BuildNodeFromTemplate(mig Mig, template *gce.Instan node.Status.Allocatable = nodeAllocatable } // GenericLabels - labels, err := BuildGenericLabels(mig.GceRef(), template.Properties.MachineType, nodeName, os, arch) + labels, err := BuildGenericLabels(mig.GceRef(), template.Properties.MachineType, nodeName, migOsInfo.Os(), migOsInfo.Arch()) if err != nil { return nil, err } diff --git a/cluster-autoscaler/cloudprovider/gce/templates_test.go b/cluster-autoscaler/cloudprovider/gce/templates_test.go index d1abd4903a96..665dfa7e2155 100644 --- a/cluster-autoscaler/cloudprovider/gce/templates_test.go +++ b/cluster-autoscaler/cloudprovider/gce/templates_test.go @@ -65,7 +65,8 @@ func TestBuildNodeFromTemplateSetsResources(t *testing.T) { ephemeralStorageLocalSSDCount int64 extendedResources apiv1.ResourceList // test outputs - expectedErr bool + expectedMigInfoErr bool + expectedNodeTemplateErr bool } testCases := []testCase{ { @@ -86,7 +87,6 @@ func TestBuildNodeFromTemplateSetsResources(t *testing.T) { reservedCpu: "1000m", reservedMemory: fmt.Sprintf("%v", 1*units.MiB), reservedEphemeralStorage: "30Gi", - expectedErr: false, }, { scenario: "no kube-reserved in kube-env", @@ -97,18 +97,16 @@ func TestBuildNodeFromTemplateSetsResources(t *testing.T) { physicalCpu: 8, physicalMemory: 200 * units.MiB, kubeReserved: false, - expectedErr: false, }, { scenario: "no kube-env at all", kubeEnv: "", physicalCpu: 8, physicalMemory: 200 * units.MiB, kubeReserved: false, - expectedErr: false, }, { - scenario: "totally messed up kube-env", - kubeEnv: "This kube-env is totally messed up", - expectedErr: true, + scenario: "totally messed up kube-env", + kubeEnv: "This kube-env is totally messed up", + expectedMigInfoErr: true, }, { scenario: "max pods per node specified", kubeEnv: "", @@ -116,7 +114,6 @@ func TestBuildNodeFromTemplateSetsResources(t *testing.T) { physicalMemory: 200 * units.MiB, pods: &thirtyPodsPerNode, kubeReserved: false, - expectedErr: false, }, { scenario: "BLOCK_EPH_STORAGE_BOOT_DISK in kube-env", @@ -133,7 +130,6 @@ func TestBuildNodeFromTemplateSetsResources(t *testing.T) { reservedEphemeralStorage: "0Gi", kubeReserved: true, isEphemeralStorageBlocked: true, - expectedErr: false, }, { scenario: "BLOCK_EPH_STORAGE_BOOT_DISK is false in kube-env", @@ -146,14 +142,13 @@ func TestBuildNodeFromTemplateSetsResources(t *testing.T) { reservedMemory: fmt.Sprintf("%v", 0*units.MiB), reservedEphemeralStorage: "0Gi", kubeReserved: true, - expectedErr: false, }, { scenario: "more local SSDs requested for ephemeral storage than attached", kubeEnv: "AUTOSCALER_ENV_VARS: os_distribution=cos;os=linux;ephemeral_storage_local_ssd_count=1\n", ephemeralStorageLocalSSDCount: 1, attachedLocalSSDCount: 0, - expectedErr: true, + expectedNodeTemplateErr: true, }, { scenario: "all attached local SSDs requested for ephemeral storage", @@ -163,7 +158,6 @@ func TestBuildNodeFromTemplateSetsResources(t *testing.T) { bootDiskSizeGiB: 300, ephemeralStorageLocalSSDCount: 2, attachedLocalSSDCount: 2, - expectedErr: false, }, { scenario: "more local SSDs attached than requested for ephemeral storage", @@ -172,7 +166,6 @@ func TestBuildNodeFromTemplateSetsResources(t *testing.T) { physicalMemory: 200 * units.MiB, ephemeralStorageLocalSSDCount: 2, attachedLocalSSDCount: 4, - expectedErr: false, }, { scenario: "ephemeral storage on local SSDs with kube-reserved", @@ -185,7 +178,6 @@ func TestBuildNodeFromTemplateSetsResources(t *testing.T) { reservedMemory: fmt.Sprintf("%v", 0*units.MiB), reservedEphemeralStorage: "10Gi", attachedLocalSSDCount: 4, - expectedErr: false, }, { scenario: "extended_resources present in kube-env", @@ -198,7 +190,6 @@ func TestBuildNodeFromTemplateSetsResources(t *testing.T) { reservedMemory: fmt.Sprintf("%v", 0*units.MiB), reservedEphemeralStorage: "10Gi", attachedLocalSSDCount: 4, - expectedErr: false, extendedResources: apiv1.ResourceList{ apiv1.ResourceName("someResource"): *resource.NewQuantity(2, resource.DecimalSI), apiv1.ResourceName("anotherResource"): *resource.NewQuantity(1*units.GB, resource.DecimalSI), @@ -215,7 +206,6 @@ func TestBuildNodeFromTemplateSetsResources(t *testing.T) { reservedMemory: fmt.Sprintf("%v", 0*units.MiB), reservedEphemeralStorage: "10Gi", attachedLocalSSDCount: 4, - expectedErr: false, extendedResources: apiv1.ResourceList{}, }, } @@ -256,8 +246,13 @@ func TestBuildNodeFromTemplateSetsResources(t *testing.T) { if tc.kubeEnv != "" { template.Properties.Metadata.Items = []*gce.MetadataItems{{Key: "kube-env", Value: &tc.kubeEnv}} } - node, err := tb.BuildNodeFromTemplate(mig, template, tc.physicalCpu, tc.physicalMemory, tc.pods, &GceReserved{}) - if tc.expectedErr { + migOsInfo, err := tb.MigOsInfo(mig.Id(), template) + if tc.expectedMigInfoErr { + assert.Error(t, err) + return + } + node, err := tb.BuildNodeFromTemplate(mig, migOsInfo, template, tc.physicalCpu, tc.physicalMemory, tc.pods, &GceReserved{}) + if tc.expectedNodeTemplateErr { assert.Error(t, err) } else { assert.NoError(t, err) @@ -286,7 +281,8 @@ func TestBuildNodeFromTemplateSetsResources(t *testing.T) { } else if tc.isEphemeralStorageBlocked { physicalEphemeralStorageGiB = 0 } - capacity, err := tb.BuildCapacity(tc.physicalCpu, tc.physicalMemory, tc.accelerators, OperatingSystemLinux, OperatingSystemDistributionCOS, "", physicalEphemeralStorageGiB*units.GiB, tc.ephemeralStorageLocalSSDCount, tc.pods, "", &GceReserved{}, tc.extendedResources) + migOsInfo := NewMigOsInfo(OperatingSystemLinux, OperatingSystemDistributionCOS, "") + capacity, err := tb.BuildCapacity(migOsInfo, tc.physicalCpu, tc.physicalMemory, tc.accelerators, physicalEphemeralStorageGiB*units.GiB, tc.ephemeralStorageLocalSSDCount, tc.pods, "", &GceReserved{}, tc.extendedResources) assert.NoError(t, err) assertEqualResourceLists(t, "Capacity", capacity, node.Status.Capacity) if !tc.kubeReserved { @@ -593,7 +589,8 @@ func TestBuildCapacityMemory(t *testing.T) { t.Run(fmt.Sprintf("%v", idx), func(t *testing.T) { tb := GceTemplateBuilder{} noAccelerators := make([]*gce.AcceleratorConfig, 0) - buildCapacity, err := tb.BuildCapacity(tc.physicalCpu, tc.physicalMemory, noAccelerators, tc.os, OperatingSystemDistributionCOS, "", -1, 0, nil, "", &GceReserved{}, apiv1.ResourceList{}) + migOsInfo := NewMigOsInfo(tc.os, OperatingSystemDistributionCOS, "") + buildCapacity, err := tb.BuildCapacity(migOsInfo, tc.physicalCpu, tc.physicalMemory, noAccelerators, -1, 0, nil, "", &GceReserved{}, apiv1.ResourceList{}) assert.NoError(t, err) expectedCapacity, err := makeResourceList2(tc.physicalCpu, tc.expectedCapacityMemory, 0, 110) assert.NoError(t, err) @@ -1397,7 +1394,11 @@ func TestBuildNodeFromTemplateArch(t *testing.T) { }, } tb := &GceTemplateBuilder{} - gotNode, gotErr := tb.BuildNodeFromTemplate(mig, template, 16, 128, nil, &GceReserved{}) + migOsInfo, gotErr := tb.MigOsInfo(mig.Id(), template) + if gotErr != nil { + t.Fatalf("MigOsInfo unexpected error: %v", gotErr) + } + gotNode, gotErr := tb.BuildNodeFromTemplate(mig, migOsInfo, template, 16, 128, nil, &GceReserved{}) if gotErr != nil { t.Fatalf("BuildNodeFromTemplate unexpected error: %v", gotErr) } From 0a2d05b6922f0f770f032dd81c9bae9f72041952 Mon Sep 17 00:00:00 2001 From: Yaroslava Serdiuk Date: Mon, 23 Jan 2023 10:07:07 +0000 Subject: [PATCH 30/35] Cleanup version from osReserved calculator --- cluster-autoscaler/cloudprovider/gce/gce_reserved.go | 4 ++-- cluster-autoscaler/cloudprovider/gce/gce_reserved_test.go | 2 +- cluster-autoscaler/cloudprovider/gce/os_reserved.go | 4 ++-- cluster-autoscaler/cloudprovider/gce/templates.go | 8 ++++---- cluster-autoscaler/cloudprovider/gce/templates_test.go | 4 ++-- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/cluster-autoscaler/cloudprovider/gce/gce_reserved.go b/cluster-autoscaler/cloudprovider/gce/gce_reserved.go index 19af3ba46b9e..aa235f0b26f4 100644 --- a/cluster-autoscaler/cloudprovider/gce/gce_reserved.go +++ b/cluster-autoscaler/cloudprovider/gce/gce_reserved.go @@ -87,7 +87,7 @@ type GceReserved struct{} // CalculateKernelReserved computes how much memory Linux kernel will reserve. // TODO(jkaniuk): account for crashkernel reservation on RHEL / CentOS -func (r *GceReserved) CalculateKernelReserved(m MigOsInfo, physicalMemory int64, nodeVersion string) int64 { +func (r *GceReserved) CalculateKernelReserved(m MigOsInfo, physicalMemory int64) int64 { os := m.Os() osDistribution := m.OsDistribution() arch := m.Arch() @@ -270,7 +270,7 @@ func EphemeralStorageOnLocalSSDFilesystemOverheadInBytes(diskCount int64, osDist } // CalculateOSReservedEphemeralStorage estimates how much ephemeral storage OS will reserve and eviction threshold -func (r *GceReserved) CalculateOSReservedEphemeralStorage(m MigOsInfo, diskSize int64, nodeVersion string) int64 { +func (r *GceReserved) CalculateOSReservedEphemeralStorage(m MigOsInfo, diskSize int64) int64 { osDistribution := m.OsDistribution() arch := m.Arch() switch osDistribution { diff --git a/cluster-autoscaler/cloudprovider/gce/gce_reserved_test.go b/cluster-autoscaler/cloudprovider/gce/gce_reserved_test.go index bd1407a688c0..ccb907b9eb32 100644 --- a/cluster-autoscaler/cloudprovider/gce/gce_reserved_test.go +++ b/cluster-autoscaler/cloudprovider/gce/gce_reserved_test.go @@ -109,7 +109,7 @@ func TestCalculateKernelReservedLinux(t *testing.T) { r := &GceReserved{} t.Run(fmt.Sprintf("%v", idx), func(t *testing.T) { m := NewMigOsInfo(OperatingSystemLinux, tc.osDistribution, tc.arch) - reserved := r.CalculateKernelReserved(m, tc.physicalMemory, "") + reserved := r.CalculateKernelReserved(m, tc.physicalMemory) if tc.osDistribution == OperatingSystemDistributionUbuntu { assert.Equal(t, tc.reservedMemory+int64(math.Min(correctionConstant*float64(tc.physicalMemory), maximumCorrectionValue)+ubuntuSpecificOffset), reserved) } else if tc.osDistribution == OperatingSystemDistributionCOS { diff --git a/cluster-autoscaler/cloudprovider/gce/os_reserved.go b/cluster-autoscaler/cloudprovider/gce/os_reserved.go index ff47d826c058..738b51872819 100644 --- a/cluster-autoscaler/cloudprovider/gce/os_reserved.go +++ b/cluster-autoscaler/cloudprovider/gce/os_reserved.go @@ -30,9 +30,9 @@ type MigOsInfo interface { type OsReservedCalculator interface { // CalculateKernelReserved computes how much memory OS kernel will reserve. // NodeVersion parameter is optional. If empty string is passed a result calculated using default node version will be returned. - CalculateKernelReserved(m MigOsInfo, physicalMemory int64, nodeVersion string) int64 + CalculateKernelReserved(m MigOsInfo, physicalMemory int64) int64 // CalculateOSReservedEphemeralStorage estimates how much ephemeral storage OS will reserve and eviction threshold. // NodeVersion parameter is optional. If empty string is passed a result calculated using default node version will be returned. - CalculateOSReservedEphemeralStorage(m MigOsInfo, diskSize int64, nodeVersion string) int64 + CalculateOSReservedEphemeralStorage(m MigOsInfo, diskSize int64) int64 } diff --git a/cluster-autoscaler/cloudprovider/gce/templates.go b/cluster-autoscaler/cloudprovider/gce/templates.go index 6c6c567e3396..ad386e01c96c 100644 --- a/cluster-autoscaler/cloudprovider/gce/templates.go +++ b/cluster-autoscaler/cloudprovider/gce/templates.go @@ -72,7 +72,7 @@ func (t *GceTemplateBuilder) getAcceleratorCount(accelerators []*gce.Accelerator // BuildCapacity builds a list of resource capacities given list of hardware. func (t *GceTemplateBuilder) BuildCapacity(m MigOsInfo, cpu int64, mem int64, accelerators []*gce.AcceleratorConfig, - ephemeralStorage int64, ephemeralStorageLocalSSDCount int64, pods *int64, version string, r OsReservedCalculator, extendedResources apiv1.ResourceList) (apiv1.ResourceList, error) { + ephemeralStorage int64, ephemeralStorageLocalSSDCount int64, pods *int64, r OsReservedCalculator, extendedResources apiv1.ResourceList) (apiv1.ResourceList, error) { capacity := apiv1.ResourceList{} if pods == nil { capacity[apiv1.ResourcePods] = *resource.NewQuantity(110, resource.DecimalSI) @@ -81,7 +81,7 @@ func (t *GceTemplateBuilder) BuildCapacity(m MigOsInfo, cpu int64, mem int64, ac } capacity[apiv1.ResourceCPU] = *resource.NewQuantity(cpu, resource.DecimalSI) - memTotal := mem - r.CalculateKernelReserved(m, mem, version) + memTotal := mem - r.CalculateKernelReserved(m, mem) capacity[apiv1.ResourceMemory] = *resource.NewQuantity(memTotal, resource.DecimalSI) if accelerators != nil && len(accelerators) > 0 { @@ -93,7 +93,7 @@ func (t *GceTemplateBuilder) BuildCapacity(m MigOsInfo, cpu int64, mem int64, ac if ephemeralStorageLocalSSDCount > 0 { storageTotal = ephemeralStorage - EphemeralStorageOnLocalSSDFilesystemOverheadInBytes(ephemeralStorageLocalSSDCount, m.OsDistribution()) } else { - storageTotal = ephemeralStorage - r.CalculateOSReservedEphemeralStorage(m, ephemeralStorage, version) + storageTotal = ephemeralStorage - r.CalculateOSReservedEphemeralStorage(m, ephemeralStorage) } capacity[apiv1.ResourceEphemeralStorage] = *resource.NewQuantity(int64(math.Max(float64(storageTotal), 0)), resource.DecimalSI) } @@ -232,7 +232,7 @@ func (t *GceTemplateBuilder) BuildNodeFromTemplate(mig Mig, migOsInfo MigOsInfo, klog.Errorf("could not fetch extended resources from instance template: %v", err) } - capacity, err := t.BuildCapacity(migOsInfo, cpu, mem, template.Properties.GuestAccelerators, ephemeralStorage, ephemeralStorageLocalSsdCount, pods, mig.Version(), reserved, extendedResources) + capacity, err := t.BuildCapacity(migOsInfo, cpu, mem, template.Properties.GuestAccelerators, ephemeralStorage, ephemeralStorageLocalSsdCount, pods, reserved, extendedResources) if err != nil { return nil, err } diff --git a/cluster-autoscaler/cloudprovider/gce/templates_test.go b/cluster-autoscaler/cloudprovider/gce/templates_test.go index 665dfa7e2155..aad078c9ed49 100644 --- a/cluster-autoscaler/cloudprovider/gce/templates_test.go +++ b/cluster-autoscaler/cloudprovider/gce/templates_test.go @@ -282,7 +282,7 @@ func TestBuildNodeFromTemplateSetsResources(t *testing.T) { physicalEphemeralStorageGiB = 0 } migOsInfo := NewMigOsInfo(OperatingSystemLinux, OperatingSystemDistributionCOS, "") - capacity, err := tb.BuildCapacity(migOsInfo, tc.physicalCpu, tc.physicalMemory, tc.accelerators, physicalEphemeralStorageGiB*units.GiB, tc.ephemeralStorageLocalSSDCount, tc.pods, "", &GceReserved{}, tc.extendedResources) + capacity, err := tb.BuildCapacity(migOsInfo, tc.physicalCpu, tc.physicalMemory, tc.accelerators, physicalEphemeralStorageGiB*units.GiB, tc.ephemeralStorageLocalSSDCount, tc.pods, &GceReserved{}, tc.extendedResources) assert.NoError(t, err) assertEqualResourceLists(t, "Capacity", capacity, node.Status.Capacity) if !tc.kubeReserved { @@ -590,7 +590,7 @@ func TestBuildCapacityMemory(t *testing.T) { tb := GceTemplateBuilder{} noAccelerators := make([]*gce.AcceleratorConfig, 0) migOsInfo := NewMigOsInfo(tc.os, OperatingSystemDistributionCOS, "") - buildCapacity, err := tb.BuildCapacity(migOsInfo, tc.physicalCpu, tc.physicalMemory, noAccelerators, -1, 0, nil, "", &GceReserved{}, apiv1.ResourceList{}) + buildCapacity, err := tb.BuildCapacity(migOsInfo, tc.physicalCpu, tc.physicalMemory, noAccelerators, -1, 0, nil, &GceReserved{}, apiv1.ResourceList{}) assert.NoError(t, err) expectedCapacity, err := makeResourceList2(tc.physicalCpu, tc.expectedCapacityMemory, 0, 110) assert.NoError(t, err) From 8cbbafea57da3d1e7fc2d2a281a189124e00a373 Mon Sep 17 00:00:00 2001 From: Yaroslava Serdiuk Date: Mon, 23 Jan 2023 11:57:06 +0000 Subject: [PATCH 31/35] Cleanup Version() method for gce Mig --- cluster-autoscaler/cloudprovider/gce/gce_cloud_provider.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/cluster-autoscaler/cloudprovider/gce/gce_cloud_provider.go b/cluster-autoscaler/cloudprovider/gce/gce_cloud_provider.go index 83a275c25ce2..d79c6a9fb0ff 100644 --- a/cluster-autoscaler/cloudprovider/gce/gce_cloud_provider.go +++ b/cluster-autoscaler/cloudprovider/gce/gce_cloud_provider.go @@ -182,7 +182,6 @@ type Mig interface { cloudprovider.NodeGroup GceRef() GceRef - Version() string } type gceMig struct { @@ -193,11 +192,6 @@ type gceMig struct { maxSize int } -// Version return the Mig version. -func (mig *gceMig) Version() string { - return "" -} - // GceRef returns Mig's GceRef func (mig *gceMig) GceRef() GceRef { return mig.gceRef From 0470fdfc354f72d51a4b5c382f552c0dff3fac4a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bart=C5=82omiej=20Wr=C3=B3blewski?= Date: Mon, 23 Jan 2023 14:14:46 +0000 Subject: [PATCH 32/35] Clean up DS utils: remove unused cluster snapshot and predicate checker --- cluster-autoscaler/core/scale_up.go | 4 ++-- cluster-autoscaler/core/utils/utils.go | 5 ++--- .../mixed_nodeinfos_processor.go | 2 +- .../utils/daemonset/daemonset.go | 19 +------------------ .../utils/daemonset/daemonset_test.go | 13 +++++-------- 5 files changed, 11 insertions(+), 32 deletions(-) diff --git a/cluster-autoscaler/core/scale_up.go b/cluster-autoscaler/core/scale_up.go index 9d70ad869c40..173d4048db0f 100644 --- a/cluster-autoscaler/core/scale_up.go +++ b/cluster-autoscaler/core/scale_up.go @@ -306,7 +306,7 @@ func ScaleUp(context *context.AutoscalingContext, processors *ca_processors.Auto // If possible replace candidate node-info with node info based on crated node group. The latter // one should be more in line with nodes which will be created by node group. - mainCreatedNodeInfo, err := utils.GetNodeInfoFromTemplate(createNodeGroupResult.MainCreatedNodeGroup, daemonSets, context.PredicateChecker, ignoredTaints) + mainCreatedNodeInfo, err := utils.GetNodeInfoFromTemplate(createNodeGroupResult.MainCreatedNodeGroup, daemonSets, ignoredTaints) if err == nil { nodeInfos[createNodeGroupResult.MainCreatedNodeGroup.Id()] = mainCreatedNodeInfo } else { @@ -320,7 +320,7 @@ func ScaleUp(context *context.AutoscalingContext, processors *ca_processors.Auto } for _, nodeGroup := range createNodeGroupResult.ExtraCreatedNodeGroups { - nodeInfo, err := utils.GetNodeInfoFromTemplate(nodeGroup, daemonSets, context.PredicateChecker, ignoredTaints) + nodeInfo, err := utils.GetNodeInfoFromTemplate(nodeGroup, daemonSets, ignoredTaints) if err != nil { klog.Warningf("Cannot build node info for newly created extra node group %v; balancing similar node groups will not work; err=%v", nodeGroup.Id(), err) diff --git a/cluster-autoscaler/core/utils/utils.go b/cluster-autoscaler/core/utils/utils.go index 6fc59b21e10b..d4c3b93d87e9 100644 --- a/cluster-autoscaler/core/utils/utils.go +++ b/cluster-autoscaler/core/utils/utils.go @@ -27,7 +27,6 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/clusterstate" "k8s.io/autoscaler/cluster-autoscaler/metrics" - "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" "k8s.io/autoscaler/cluster-autoscaler/utils/daemonset" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" @@ -37,7 +36,7 @@ import ( ) // GetNodeInfoFromTemplate returns NodeInfo object built base on TemplateNodeInfo returned by NodeGroup.TemplateNodeInfo(). -func GetNodeInfoFromTemplate(nodeGroup cloudprovider.NodeGroup, daemonsets []*appsv1.DaemonSet, predicateChecker predicatechecker.PredicateChecker, ignoredTaints taints.TaintKeySet) (*schedulerframework.NodeInfo, errors.AutoscalerError) { +func GetNodeInfoFromTemplate(nodeGroup cloudprovider.NodeGroup, daemonsets []*appsv1.DaemonSet, ignoredTaints taints.TaintKeySet) (*schedulerframework.NodeInfo, errors.AutoscalerError) { id := nodeGroup.Id() baseNodeInfo, err := nodeGroup.TemplateNodeInfo() if err != nil { @@ -46,7 +45,7 @@ func GetNodeInfoFromTemplate(nodeGroup cloudprovider.NodeGroup, daemonsets []*ap labels.UpdateDeprecatedLabels(baseNodeInfo.Node().ObjectMeta.Labels) - pods, err := daemonset.GetDaemonSetPodsForNode(baseNodeInfo, daemonsets, predicateChecker) + pods, err := daemonset.GetDaemonSetPodsForNode(baseNodeInfo, daemonsets) if err != nil { return nil, errors.ToAutoscalerError(errors.InternalError, err) } diff --git a/cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor.go b/cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor.go index 78ff16f3cc90..f6008e8012e8 100644 --- a/cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor.go +++ b/cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor.go @@ -143,7 +143,7 @@ func (p *MixedTemplateNodeInfoProvider) Process(ctx *context.AutoscalingContext, // No good template, trying to generate one. This is called only if there are no // working nodes in the node groups. By default CA tries to use a real-world example. - nodeInfo, err := utils.GetNodeInfoFromTemplate(nodeGroup, daemonsets, ctx.PredicateChecker, ignoredTaints) + nodeInfo, err := utils.GetNodeInfoFromTemplate(nodeGroup, daemonsets, ignoredTaints) if err != nil { if err == cloudprovider.ErrNotImplemented { continue diff --git a/cluster-autoscaler/utils/daemonset/daemonset.go b/cluster-autoscaler/utils/daemonset/daemonset.go index 8eb17499494c..193ea07f671a 100644 --- a/cluster-autoscaler/utils/daemonset/daemonset.go +++ b/cluster-autoscaler/utils/daemonset/daemonset.go @@ -20,9 +20,6 @@ import ( "fmt" "math/rand" - "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" - "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" - appsv1 "k8s.io/api/apps/v1" apiv1 "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/controller/daemon" @@ -36,22 +33,8 @@ const ( ) // GetDaemonSetPodsForNode returns daemonset nodes for the given pod. -func GetDaemonSetPodsForNode(nodeInfo *schedulerframework.NodeInfo, daemonsets []*appsv1.DaemonSet, predicateChecker predicatechecker.PredicateChecker) ([]*apiv1.Pod, error) { +func GetDaemonSetPodsForNode(nodeInfo *schedulerframework.NodeInfo, daemonsets []*appsv1.DaemonSet) ([]*apiv1.Pod, error) { result := make([]*apiv1.Pod, 0) - - // here we can use empty snapshot - clusterSnapshot := clustersnapshot.NewBasicClusterSnapshot() - - // add a node with pods - node info is created by cloud provider, - // we don't know whether it'll have pods or not. - var pods []*apiv1.Pod - for _, podInfo := range nodeInfo.Pods { - pods = append(pods, podInfo.Pod) - } - if err := clusterSnapshot.AddNodeWithPods(nodeInfo.Node(), pods); err != nil { - return nil, err - } - for _, ds := range daemonsets { shouldRun, _ := daemon.NodeShouldRunDaemonPod(nodeInfo.Node(), ds) if shouldRun { diff --git a/cluster-autoscaler/utils/daemonset/daemonset_test.go b/cluster-autoscaler/utils/daemonset/daemonset_test.go index 2a0567fd56fb..18a27f04805e 100644 --- a/cluster-autoscaler/utils/daemonset/daemonset_test.go +++ b/cluster-autoscaler/utils/daemonset/daemonset_test.go @@ -21,7 +21,6 @@ import ( "testing" "time" - "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" appsv1 "k8s.io/api/apps/v1" @@ -39,36 +38,34 @@ func TestGetDaemonSetPodsForNode(t *testing.T) { nodeInfo := schedulerframework.NewNodeInfo() nodeInfo.SetNode(node) - predicateChecker, err := predicatechecker.NewTestPredicateChecker() - assert.NoError(t, err) ds1 := newDaemonSet("ds1", "0.1", "100M", nil) ds2 := newDaemonSet("ds2", "0.1", "100M", map[string]string{"foo": "bar"}) ds3 := newDaemonSet("ds1", "4", "4000M", nil) { - daemonSets, err := GetDaemonSetPodsForNode(nodeInfo, []*appsv1.DaemonSet{ds1, ds2}, predicateChecker) + daemonSets, err := GetDaemonSetPodsForNode(nodeInfo, []*appsv1.DaemonSet{ds1, ds2}) assert.NoError(t, err) assert.Equal(t, 1, len(daemonSets)) assert.True(t, strings.HasPrefix(daemonSets[0].Name, "ds1")) } { - daemonSets, err := GetDaemonSetPodsForNode(nodeInfo, []*appsv1.DaemonSet{ds1}, predicateChecker) + daemonSets, err := GetDaemonSetPodsForNode(nodeInfo, []*appsv1.DaemonSet{ds1}) assert.NoError(t, err) assert.Equal(t, 1, len(daemonSets)) } { - daemonSets, err := GetDaemonSetPodsForNode(nodeInfo, []*appsv1.DaemonSet{ds2}, predicateChecker) + daemonSets, err := GetDaemonSetPodsForNode(nodeInfo, []*appsv1.DaemonSet{ds2}) assert.NoError(t, err) assert.Equal(t, 0, len(daemonSets)) } { - daemonSets, err := GetDaemonSetPodsForNode(nodeInfo, []*appsv1.DaemonSet{ds3}, predicateChecker) + daemonSets, err := GetDaemonSetPodsForNode(nodeInfo, []*appsv1.DaemonSet{ds3}) assert.NoError(t, err) assert.Equal(t, 1, len(daemonSets)) } { - daemonSets, err := GetDaemonSetPodsForNode(nodeInfo, []*appsv1.DaemonSet{}, predicateChecker) + daemonSets, err := GetDaemonSetPodsForNode(nodeInfo, []*appsv1.DaemonSet{}) assert.NoError(t, err) assert.Equal(t, 0, len(daemonSets)) } From d4b812e936579856201c4cf6bdc8681d661f2b17 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bart=C5=82omiej=20Wr=C3=B3blewski?= Date: Mon, 23 Jan 2023 17:14:03 +0000 Subject: [PATCH 33/35] Add filtering out DS pods from scale-up, refactor default pod list processor --- .../filter_out_daemon_sets.go | 54 +++++++++++++ .../filter_out_daemon_sets_test.go | 77 +++++++++++++++++++ .../podlistprocessor/pod_list_processor.go | 31 +++++--- cluster-autoscaler/core/test/common.go | 5 +- cluster-autoscaler/main.go | 5 +- 5 files changed, 152 insertions(+), 20 deletions(-) create mode 100644 cluster-autoscaler/core/podlistprocessor/filter_out_daemon_sets.go create mode 100644 cluster-autoscaler/core/podlistprocessor/filter_out_daemon_sets_test.go diff --git a/cluster-autoscaler/core/podlistprocessor/filter_out_daemon_sets.go b/cluster-autoscaler/core/podlistprocessor/filter_out_daemon_sets.go new file mode 100644 index 000000000000..d863d8128089 --- /dev/null +++ b/cluster-autoscaler/core/podlistprocessor/filter_out_daemon_sets.go @@ -0,0 +1,54 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podlistprocessor + +import ( + apiv1 "k8s.io/api/core/v1" + "k8s.io/autoscaler/cluster-autoscaler/context" + podutils "k8s.io/autoscaler/cluster-autoscaler/utils/pod" + klog "k8s.io/klog/v2" +) + +type filterOutDaemonSetPodListProcessor struct { +} + +// NewFilterOutDaemonSetPodListProcessor creates a PodListProcessor filtering out daemon set pods +func NewFilterOutDaemonSetPodListProcessor() *filterOutDaemonSetPodListProcessor { + return &filterOutDaemonSetPodListProcessor{} +} + +// Process filters out pods which are daemon set pods. +func (p *filterOutDaemonSetPodListProcessor) Process(context *context.AutoscalingContext, unschedulablePods []*apiv1.Pod) ([]*apiv1.Pod, error) { + // Scale-up cannot help unschedulable Daemon Set pods, as those require a specific node + // for scheduling. To improve that we are filtering them here, as the CA won't be + // able to help them so there is no point to in passing them to scale-up logic. + + klog.V(4).Infof("Filtering out daemon set pods") + + var nonDaemonSetPods []*apiv1.Pod + for _, pod := range unschedulablePods { + if !podutils.IsDaemonSetPod(pod) { + nonDaemonSetPods = append(nonDaemonSetPods, pod) + } + } + + klog.V(4).Infof("Filtered out %v daemon set pods, %v unschedulable pods left", len(unschedulablePods)-len(nonDaemonSetPods), len(nonDaemonSetPods)) + return nonDaemonSetPods, nil +} + +func (p *filterOutDaemonSetPodListProcessor) CleanUp() { +} diff --git a/cluster-autoscaler/core/podlistprocessor/filter_out_daemon_sets_test.go b/cluster-autoscaler/core/podlistprocessor/filter_out_daemon_sets_test.go new file mode 100644 index 000000000000..ca99f6f698cf --- /dev/null +++ b/cluster-autoscaler/core/podlistprocessor/filter_out_daemon_sets_test.go @@ -0,0 +1,77 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podlistprocessor + +import ( + "testing" + + "github.com/stretchr/testify/assert" + apiv1 "k8s.io/api/core/v1" + "k8s.io/autoscaler/cluster-autoscaler/utils/test" +) + +func TestFilterOutDaemonSetPodListProcessor(t *testing.T) { + testCases := []struct { + name string + pods []*apiv1.Pod + wantPods []*apiv1.Pod + }{ + { + name: "no pods", + }, + { + name: "single non-DS pod", + pods: []*apiv1.Pod{ + test.BuildTestPod("p", 1000, 1), + }, + wantPods: []*apiv1.Pod{ + test.BuildTestPod("p", 1000, 1), + }, + }, + { + name: "single DS pod", + pods: []*apiv1.Pod{ + test.SetDSPodSpec(test.BuildTestPod("p", 1000, 1)), + }, + }, + { + name: "mixed DS and non-DS pods", + pods: []*apiv1.Pod{ + test.BuildTestPod("p1", 1000, 1), + test.SetDSPodSpec(test.BuildTestPod("p2", 1000, 1)), + test.SetDSPodSpec(test.BuildTestPod("p3", 1000, 1)), + test.BuildTestPod("p4", 1000, 1), + test.BuildTestPod("p5", 1000, 1), + test.SetDSPodSpec(test.BuildTestPod("p6", 1000, 1)), + }, + wantPods: []*apiv1.Pod{ + test.BuildTestPod("p1", 1000, 1), + test.BuildTestPod("p4", 1000, 1), + test.BuildTestPod("p5", 1000, 1), + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + processor := NewFilterOutDaemonSetPodListProcessor() + pods, err := processor.Process(nil, tc.pods) + assert.NoError(t, err) + assert.Equal(t, tc.wantPods, pods) + }) + } +} diff --git a/cluster-autoscaler/core/podlistprocessor/pod_list_processor.go b/cluster-autoscaler/core/podlistprocessor/pod_list_processor.go index 5ef258b8b796..d357d00bde18 100644 --- a/cluster-autoscaler/core/podlistprocessor/pod_list_processor.go +++ b/cluster-autoscaler/core/podlistprocessor/pod_list_processor.go @@ -19,33 +19,40 @@ package podlistprocessor import ( apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/context" + "k8s.io/autoscaler/cluster-autoscaler/processors/pods" + "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" ) type defaultPodListProcessor struct { - currentlyDrainedNodes *currentlyDrainedNodesPodListProcessor - filterOutSchedulable *filterOutSchedulablePodListProcessor + processors []pods.PodListProcessor } // NewDefaultPodListProcessor returns a default implementation of the pod list // processor, which wraps and sequentially runs other sub-processors. -func NewDefaultPodListProcessor(currentlyDrainedNodes *currentlyDrainedNodesPodListProcessor, filterOutSchedulable *filterOutSchedulablePodListProcessor) *defaultPodListProcessor { +func NewDefaultPodListProcessor(predicateChecker predicatechecker.PredicateChecker) *defaultPodListProcessor { return &defaultPodListProcessor{ - currentlyDrainedNodes: currentlyDrainedNodes, - filterOutSchedulable: filterOutSchedulable, + processors: []pods.PodListProcessor{ + NewCurrentlyDrainedNodesPodListProcessor(), + NewFilterOutSchedulablePodListProcessor(predicateChecker), + NewFilterOutDaemonSetPodListProcessor(), + }, } } // Process runs sub-processors sequentially func (p *defaultPodListProcessor) Process(ctx *context.AutoscalingContext, unschedulablePods []*apiv1.Pod) ([]*apiv1.Pod, error) { - unschedulablePods, err := p.currentlyDrainedNodes.Process(ctx, unschedulablePods) - if err != nil { - return nil, err + var err error + for _, processor := range p.processors { + unschedulablePods, err = processor.Process(ctx, unschedulablePods) + if err != nil { + return nil, err + } } - - return p.filterOutSchedulable.Process(ctx, unschedulablePods) + return unschedulablePods, nil } func (p *defaultPodListProcessor) CleanUp() { - p.currentlyDrainedNodes.CleanUp() - p.filterOutSchedulable.CleanUp() + for _, processor := range p.processors { + processor.CleanUp() + } } diff --git a/cluster-autoscaler/core/test/common.go b/cluster-autoscaler/core/test/common.go index 8a57ec508dea..25fd91106d45 100644 --- a/cluster-autoscaler/core/test/common.go +++ b/cluster-autoscaler/core/test/common.go @@ -136,10 +136,7 @@ func ExtractPodNames(pods []*apiv1.Pod) []string { // NewTestProcessors returns a set of simple processors for use in tests. func NewTestProcessors(context *context.AutoscalingContext) *processors.AutoscalingProcessors { return &processors.AutoscalingProcessors{ - PodListProcessor: podlistprocessor.NewDefaultPodListProcessor( - podlistprocessor.NewCurrentlyDrainedNodesPodListProcessor(), - podlistprocessor.NewFilterOutSchedulablePodListProcessor(context.PredicateChecker), - ), + PodListProcessor: podlistprocessor.NewDefaultPodListProcessor(context.PredicateChecker), NodeGroupListProcessor: &nodegroups.NoOpNodeGroupListProcessor{}, NodeGroupSetProcessor: nodegroupset.NewDefaultNodeGroupSetProcessor([]string{}, config.NodeGroupDifferenceRatios{}), ScaleDownSetProcessor: nodes.NewPostFilteringScaleDownNodeProcessor(), diff --git a/cluster-autoscaler/main.go b/cluster-autoscaler/main.go index 18a30568c91d..961ddd16cab9 100644 --- a/cluster-autoscaler/main.go +++ b/cluster-autoscaler/main.go @@ -400,10 +400,7 @@ func buildAutoscaler(debuggingSnapshotter debuggingsnapshot.DebuggingSnapshotter opts.Processors = ca_processors.DefaultProcessors() opts.Processors.TemplateNodeInfoProvider = nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nodeInfoCacheExpireTime) - opts.Processors.PodListProcessor = podlistprocessor.NewDefaultPodListProcessor( - podlistprocessor.NewCurrentlyDrainedNodesPodListProcessor(), - podlistprocessor.NewFilterOutSchedulablePodListProcessor(opts.PredicateChecker), - ) + opts.Processors.PodListProcessor = podlistprocessor.NewDefaultPodListProcessor(opts.PredicateChecker) if autoscalingOptions.ParallelDrain { sdProcessor := nodes.NewScaleDownCandidatesSortingProcessor() opts.Processors.ScaleDownNodeProcessor = sdProcessor From c35eb6491d3c6b65dab5ad9ee3468cb439630ecd Mon Sep 17 00:00:00 2001 From: Jayant Jain Date: Wed, 25 Jan 2023 14:13:29 +0000 Subject: [PATCH 34/35] update vendor to v1.27.0-alpha.1 --- cluster-autoscaler/go.mod | 125 +- cluster-autoscaler/go.sum | 204 +- .../Microsoft/hcsshim/internal/cow/cow.go | 6 + .../Microsoft/hcsshim/internal/hcs/system.go | 12 +- .../hcsshim/internal/winapi/utils.go | 25 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 1418 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../internal/shareddefaults/shared_config.go | 18 +- .../shared_config_resolve_home.go | 18 + .../shared_config_resolve_home_go1.12.go | 13 + .../private/protocol/json/jsonutil/build.go | 19 +- .../protocol/json/jsonutil/unmarshal.go | 13 + .../protocol/query/queryutil/queryutil.go | 34 +- .../aws-sdk-go/private/protocol/rest/build.go | 18 +- .../private/protocol/rest/unmarshal.go | 18 +- .../private/protocol/xml/xmlutil/build.go | 32 +- .../private/protocol/xml/xmlutil/unmarshal.go | 18 +- .../aws/aws-sdk-go/service/autoscaling/api.go | 432 +- .../aws/aws-sdk-go/service/ec2/api.go | 3341 ++++- .../aws/aws-sdk-go/service/elbv2/api.go | 158 +- .../aws/aws-sdk-go/service/sts/api.go | 213 +- .../go-openapi/jsonpointer/.travis.yml | 15 - .../go-openapi/jsonreference/.golangci.yml | 13 +- .../go-openapi/jsonreference/.travis.yml | 24 - .../jsonreference/internal/normalize_url.go | 17 +- .../github.com/go-openapi/swag/.gitattributes | 2 + .../github.com/go-openapi/swag/.golangci.yml | 15 + .../github.com/go-openapi/swag/.travis.yml | 37 - .../vendor/github.com/go-openapi/swag/doc.go | 15 +- .../vendor/github.com/go-openapi/swag/file.go | 33 + .../github.com/go-openapi/swag/loading.go | 11 +- .../github.com/go-openapi/swag/post_go18.go | 1 + .../github.com/go-openapi/swag/post_go19.go | 1 + .../github.com/go-openapi/swag/pre_go18.go | 1 + .../github.com/go-openapi/swag/pre_go19.go | 1 + .../vendor/github.com/go-openapi/swag/util.go | 17 +- .../vendor/github.com/go-openapi/swag/yaml.go | 252 +- .../google/cadvisor/accelerators/nvidia.go | 276 - .../cadvisor/container/common/helpers.go | 2 +- .../google/cadvisor/container/factory.go | 2 - .../google/cadvisor/info/v1/machine.go | 3 + .../google/cadvisor/machine/info.go | 6 + .../google/cadvisor/manager/container.go | 15 +- .../google/cadvisor/manager/manager.go | 15 - .../google/cadvisor/metrics/prometheus.go | 54 +- .../github.com/google/cel-go/cel/program.go | 5 +- .../vendor/github.com/google/uuid/hash.go | 4 +- .../vendor/github.com/google/uuid/null.go | 118 + .../vendor/github.com/google/uuid/sql.go | 2 +- .../vendor/github.com/google/uuid/uuid.go | 55 +- .../vendor/github.com/google/uuid/version4.go | 35 +- .../mailru/easyjson/jlexer/lexer.go | 14 + .../mindprince/gonvml/.travis.gofmt.sh | 7 - .../github.com/mindprince/gonvml/.travis.yml | 9 - .../github.com/mindprince/gonvml/LICENSE | 202 - .../github.com/mindprince/gonvml/Makefile | 23 - .../github.com/mindprince/gonvml/NVML_NOTICE | 32 - .../github.com/mindprince/gonvml/README.md | 19 - .../github.com/mindprince/gonvml/bindings.go | 524 - .../mindprince/gonvml/bindings_nocgo.go | 115 - .../github.com/mindprince/gonvml/nvml.h | 5605 -------- .../vendor/github.com/moby/ipvs/.golangci.yml | 8 + .../vendor/github.com/moby/ipvs/LICENSE | 8 +- .../vendor/github.com/moby/ipvs/README.md | 3 +- .../ipvs/{constants.go => constants_linux.go} | 26 +- .../moby/ipvs/{ipvs.go => ipvs_linux.go} | 3 - .../ipvs/{netlink.go => netlink_linux.go} | 58 +- .../runtime-spec/specs-go/config.go | 107 +- .../github.com/sirupsen/logrus/README.md | 4 +- .../github.com/sirupsen/logrus/buffer_pool.go | 9 - .../github.com/sirupsen/logrus/entry.go | 21 +- .../github.com/sirupsen/logrus/logger.go | 13 + .../github.com/stretchr/objx/Taskfile.yml | 2 +- .../github.com/vishvananda/netns/README.md | 12 + .../github.com/vishvananda/netns/doc.go | 9 + .../vishvananda/netns/netns_linux.go | 108 +- .../{netns_unspecified.go => netns_others.go} | 17 + .../netns/{netns.go => nshandle_linux.go} | 10 +- .../vishvananda/netns/nshandle_others.go | 45 + .../github.com/vmware/govmomi/CONTRIBUTORS | 156 +- .../github.com/vmware/govmomi/find/finder.go | 104 +- .../vmware/govmomi/history/collector.go | 91 + .../vmware/govmomi/internal/helpers.go | 63 + .../vmware/govmomi/internal/methods.go | 123 + .../vmware/govmomi/internal/types.go | 270 + .../govmomi/internal/version/version.go | 25 + .../github.com/vmware/govmomi/list/lister.go | 65 + .../vmware/govmomi/lookup/client.go | 26 +- .../github.com/vmware/govmomi/nfc/lease.go | 9 +- .../vmware/govmomi/nfc/lease_updater.go | 4 +- .../govmomi/object/authorization_manager.go | 47 + .../object/cluster_compute_resource.go | 14 + .../vmware/govmomi/object/common.go | 34 +- .../object/customization_spec_manager.go | 7 + .../vmware/govmomi/object/datastore.go | 5 + .../vmware/govmomi/object/datastore_file.go | 6 +- .../vmware/govmomi/object/datastore_path.go | 2 +- .../govmomi/object/diagnostic_manager.go | 6 +- .../object/distributed_virtual_portgroup.go | 12 +- .../object/distributed_virtual_switch.go | 40 +- .../vmware/govmomi/object/folder.go | 14 + .../govmomi/object/history_collector.go | 72 - .../govmomi/object/host_config_manager.go | 151 +- .../govmomi/object/host_datastore_system.go | 16 + .../govmomi/object/host_network_system.go | 8 +- .../govmomi/object/host_storage_system.go | 26 + .../vmware/govmomi/object/host_system.go | 19 +- .../vmware/govmomi/object/network.go | 14 +- .../govmomi/object/network_reference.go | 2 +- .../vmware/govmomi/object/opaque_network.go | 31 +- .../vmware/govmomi/object/resource_pool.go | 13 + .../vmware/govmomi/object/search_index.go | 85 + .../github.com/vmware/govmomi/object/task.go | 42 +- .../vmware/govmomi/object/tenant_manager.go | 78 + .../vmware/govmomi/object/virtual_app.go | 16 + .../govmomi/object/virtual_device_list.go | 76 +- .../vmware/govmomi/object/virtual_machine.go | 293 +- .../vmware_distributed_virtual_switch.go | 4 + .../github.com/vmware/govmomi/pbm/client.go | 63 +- .../vmware/govmomi/pbm/methods/methods.go | 2 +- .../github.com/vmware/govmomi/pbm/pbm_util.go | 2 + .../vmware/govmomi/pbm/types/enum.go | 89 +- .../github.com/vmware/govmomi/pbm/types/if.go | 2 +- .../vmware/govmomi/pbm/types/types.go | 35 +- .../vmware/govmomi/property/collector.go | 14 +- .../vmware/govmomi/property/filter.go | 35 +- .../vmware/govmomi/property/wait.go | 24 +- .../vmware/govmomi/session/keep_alive.go | 106 +- .../govmomi/session/keepalive/handler.go | 204 + .../vmware/govmomi/session/manager.go | 31 +- .../github.com/vmware/govmomi/sts/client.go | 14 +- .../vmware/govmomi/sts/internal/types.go | 6 +- .../github.com/vmware/govmomi/sts/signer.go | 63 +- .../github.com/vmware/govmomi/task/error.go | 1 + .../vmware/govmomi/task/history_collector.go | 89 + .../github.com/vmware/govmomi/task/manager.go | 61 + .../github.com/vmware/govmomi/task/wait.go | 17 +- .../vmware/govmomi/vapi/internal/internal.go | 101 +- .../vmware/govmomi/vapi/rest/client.go | 227 +- .../vmware/govmomi/vapi/rest/resource.go | 114 + .../vmware/govmomi/vapi/tags/categories.go | 29 +- .../vmware/govmomi/vapi/tags/errors.go | 55 + .../govmomi/vapi/tags/tag_association.go | 280 +- .../vmware/govmomi/vapi/tags/tags.go | 18 +- .../vmware/govmomi/view/container_view.go | 145 + .../vmware/govmomi/view/list_view.go | 62 + .../govmomi/view/managed_object_view.go | 52 + .../github.com/vmware/govmomi/view/manager.go | 69 + .../vmware/govmomi/view/task_view.go | 125 + .../github.com/vmware/govmomi/vim25/client.go | 51 +- .../vmware/govmomi/vim25/debug/debug.go | 44 +- .../vmware/govmomi/vim25/debug/file.go | 75 + .../vmware/govmomi/vim25/debug/log.go | 49 + .../vmware/govmomi/vim25/methods/methods.go | 3218 +++-- .../govmomi/vim25/methods/unreleased.go | 44 + .../github.com/vmware/govmomi/vim25/mo/mo.go | 83 +- .../vmware/govmomi/vim25/mo/retrieve.go | 81 +- .../vmware/govmomi/vim25/mo/type_info.go | 7 +- .../vmware/govmomi/vim25/progress/reader.go | 5 +- .../github.com/vmware/govmomi/vim25/retry.go | 70 +- .../vmware/govmomi/vim25/soap/client.go | 166 +- .../vmware/govmomi/vim25/soap/debug.go | 48 +- .../vmware/govmomi/vim25/soap/error.go | 10 + .../vmware/govmomi/vim25/types/enum.go | 1135 +- .../vmware/govmomi/vim25/types/fault.go | 11 + .../vmware/govmomi/vim25/types/helpers.go | 225 +- .../vmware/govmomi/vim25/types/if.go | 124 +- .../vmware/govmomi/vim25/types/types.go | 4633 ++++++- .../vmware/govmomi/vim25/types/unreleased.go | 121 + .../vmware/govmomi/vim25/xml/LICENSE | 2 +- .../vmware/govmomi/vim25/xml/marshal.go | 335 +- .../vmware/govmomi/vim25/xml/read.go | 256 +- .../vmware/govmomi/vim25/xml/typeinfo.go | 69 +- .../vmware/govmomi/vim25/xml/xml.go | 289 +- .../x/sync/singleflight/singleflight.go | 11 +- .../grpc/attributes/attributes.go | 2 +- .../vendor/google.golang.org/grpc/backoff.go | 2 +- .../grpc/balancer/balancer.go | 25 +- .../grpc/balancer/base/balancer.go | 4 +- .../grpc/balancer/conn_state_evaluator.go | 12 +- .../grpc/balancer/roundrobin/roundrobin.go | 16 +- .../grpc/balancer_conn_wrappers.go | 71 +- .../grpc_binarylog_v1/binarylog.pb.go | 13 +- .../grpc/channelz/channelz.go | 2 +- .../google.golang.org/grpc/clientconn.go | 135 +- .../grpc/credentials/credentials.go | 20 +- .../google.golang.org/grpc/credentials/tls.go | 2 +- .../google.golang.org/grpc/dialoptions.go | 15 +- .../grpc/encoding/encoding.go | 5 +- .../grpc/encoding/gzip/gzip.go | 2 +- .../grpc/grpclog/loggerv2.go | 2 +- .../health/grpc_health_v1/health_grpc.pb.go | 17 + .../grpc/internal/binarylog/binarylog.go | 20 +- .../grpc/internal/binarylog/env_config.go | 20 +- .../grpc/internal/binarylog/method_logger.go | 19 +- .../grpc/internal/channelz/types.go | 16 +- .../grpc/internal/envconfig/envconfig.go | 8 +- .../grpc/internal/envconfig/observability.go | 36 + .../grpc/internal/envconfig/xds.go | 7 +- .../grpc/internal/grpclog/grpclog.go | 2 +- .../grpc/internal/grpcrand/grpcrand.go | 7 + .../grpc/internal/grpcsync/oncefunc.go | 32 + .../grpc/internal/grpcutil/compressor.go | 47 + .../grpc/internal/grpcutil/method.go | 1 - .../grpc/internal/internal.go | 42 +- .../grpc/internal/resolver/unix/unix.go | 5 +- .../internal/serviceconfig/serviceconfig.go | 8 +- .../grpc/internal/status/status.go | 10 + .../grpc/internal/transport/controlbuf.go | 4 +- .../grpc/internal/transport/handler_server.go | 8 +- .../grpc/internal/transport/http2_client.go | 194 +- .../grpc/internal/transport/http2_server.go | 19 +- .../grpc/internal/transport/http_util.go | 23 +- .../grpc/internal/transport/transport.go | 14 +- .../grpc/metadata/metadata.go | 73 +- .../google.golang.org/grpc/picker_wrapper.go | 7 +- .../google.golang.org/grpc/preloader.go | 2 +- .../grpc/resolver/resolver.go | 14 +- .../vendor/google.golang.org/grpc/rpc_util.go | 39 +- .../vendor/google.golang.org/grpc/server.go | 147 +- .../grpc/serviceconfig/serviceconfig.go | 2 +- .../google.golang.org/grpc/status/status.go | 12 +- .../vendor/google.golang.org/grpc/stream.go | 133 +- .../vendor/google.golang.org/grpc/tap/tap.go | 2 +- .../vendor/google.golang.org/grpc/version.go | 2 +- .../vendor/google.golang.org/grpc/vet.sh | 6 +- .../v1/types_swagger_doc_generated.go | 2 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../v1/types_swagger_doc_generated.go | 2 +- .../v1alpha1/types_swagger_doc_generated.go | 2 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../v1alpha1/types_swagger_doc_generated.go | 2 +- .../apps/v1/types_swagger_doc_generated.go | 2 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../v1beta2/types_swagger_doc_generated.go | 2 +- .../v1/types_swagger_doc_generated.go | 2 +- .../v1alpha1/types_swagger_doc_generated.go | 2 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../v1/types_swagger_doc_generated.go | 2 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../v1/types_swagger_doc_generated.go | 2 +- .../v2/types_swagger_doc_generated.go | 2 +- .../v2beta1/types_swagger_doc_generated.go | 2 +- .../v2beta2/types_swagger_doc_generated.go | 2 +- .../k8s.io/api/batch/v1/generated.proto | 26 +- .../vendor/k8s.io/api/batch/v1/types.go | 26 +- .../batch/v1/types_swagger_doc_generated.go | 24 +- .../k8s.io/api/batch/v1beta1/generated.proto | 1 + .../vendor/k8s.io/api/batch/v1beta1/types.go | 1 + .../v1beta1/types_swagger_doc_generated.go | 4 +- .../k8s.io/api/certificates/v1/types.go | 3 +- .../v1/types_swagger_doc_generated.go | 2 +- .../api/certificates/v1beta1/generated.proto | 6 +- .../k8s.io/api/certificates/v1beta1/types.go | 9 +- .../v1beta1/types_swagger_doc_generated.go | 4 +- .../api/coordination/v1/generated.proto | 6 +- .../k8s.io/api/coordination/v1/types.go | 6 +- .../v1/types_swagger_doc_generated.go | 8 +- .../api/coordination/v1beta1/generated.proto | 6 +- .../k8s.io/api/coordination/v1beta1/types.go | 6 +- .../v1beta1/types_swagger_doc_generated.go | 8 +- .../api/core/v1/annotation_key_constants.go | 2 +- .../vendor/k8s.io/api/core/v1/generated.pb.go | 1822 +-- .../vendor/k8s.io/api/core/v1/generated.proto | 15 +- .../vendor/k8s.io/api/core/v1/toleration.go | 14 +- .../vendor/k8s.io/api/core/v1/types.go | 58 +- .../core/v1/types_swagger_doc_generated.go | 8 +- .../api/core/v1/zz_generated.deepcopy.go | 2 +- .../k8s.io/api/discovery/v1/generated.proto | 13 +- .../vendor/k8s.io/api/discovery/v1/types.go | 29 +- .../v1/types_swagger_doc_generated.go | 12 +- .../api/discovery/v1beta1/generated.proto | 13 +- .../k8s.io/api/discovery/v1beta1/types.go | 30 +- .../v1beta1/types_swagger_doc_generated.go | 12 +- .../events/v1/types_swagger_doc_generated.go | 2 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../api/extensions/v1beta1/generated.pb.go | 11046 +++++----------- .../api/extensions/v1beta1/generated.proto | 287 - .../k8s.io/api/extensions/v1beta1/register.go | 2 - .../k8s.io/api/extensions/v1beta1/types.go | 383 - .../v1beta1/types_swagger_doc_generated.go | 162 +- .../v1beta1/zz_generated.deepcopy.go | 366 - .../zz_generated.prerelease-lifecycle.go | 48 - .../v1alpha1/types_swagger_doc_generated.go | 2 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../v1beta2/types_swagger_doc_generated.go | 2 +- .../v1beta3/types_swagger_doc_generated.go | 2 +- .../k8s.io/api/networking/v1/generated.proto | 176 +- .../vendor/k8s.io/api/networking/v1/types.go | 188 +- .../v1/types_swagger_doc_generated.go | 120 +- .../api/networking/v1alpha1/generated.proto | 18 +- .../k8s.io/api/networking/v1alpha1/types.go | 20 +- .../v1alpha1/types_swagger_doc_generated.go | 14 +- .../api/networking/v1beta1/generated.proto | 78 +- .../k8s.io/api/networking/v1beta1/types.go | 87 +- .../v1beta1/types_swagger_doc_generated.go | 72 +- .../vendor/k8s.io/api/node/v1/generated.proto | 10 +- .../vendor/k8s.io/api/node/v1/types.go | 12 +- .../node/v1/types_swagger_doc_generated.go | 12 +- .../k8s.io/api/node/v1alpha1/generated.proto | 14 +- .../vendor/k8s.io/api/node/v1alpha1/types.go | 16 +- .../v1alpha1/types_swagger_doc_generated.go | 14 +- .../k8s.io/api/node/v1beta1/generated.proto | 12 +- .../vendor/k8s.io/api/node/v1beta1/types.go | 14 +- .../v1beta1/types_swagger_doc_generated.go | 12 +- .../policy/v1/types_swagger_doc_generated.go | 2 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../rbac/v1/types_swagger_doc_generated.go | 2 +- .../v1alpha1/types_swagger_doc_generated.go | 2 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../v1alpha1/types_swagger_doc_generated.go | 2 +- .../k8s.io/api/scheduling/v1/generated.proto | 4 +- .../vendor/k8s.io/api/scheduling/v1/types.go | 4 +- .../v1/types_swagger_doc_generated.go | 6 +- .../api/scheduling/v1alpha1/generated.proto | 4 +- .../k8s.io/api/scheduling/v1alpha1/types.go | 4 +- .../v1alpha1/types_swagger_doc_generated.go | 6 +- .../api/scheduling/v1beta1/generated.proto | 4 +- .../k8s.io/api/scheduling/v1beta1/types.go | 4 +- .../v1beta1/types_swagger_doc_generated.go | 6 +- .../k8s.io/api/storage/v1/generated.proto | 125 +- .../vendor/k8s.io/api/storage/v1/types.go | 131 +- .../storage/v1/types_swagger_doc_generated.go | 82 +- .../api/storage/v1alpha1/generated.proto | 38 +- .../k8s.io/api/storage/v1alpha1/types.go | 41 +- .../v1alpha1/types_swagger_doc_generated.go | 38 +- .../api/storage/v1beta1/generated.proto | 111 +- .../k8s.io/api/storage/v1beta1/types.go | 118 +- .../v1beta1/types_swagger_doc_generated.go | 78 +- .../k8s.io/apimachinery/pkg/api/meta/help.go | 3 +- .../meta/v1/types_swagger_doc_generated.go | 2 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../k8s.io/apimachinery/pkg/labels/labels.go | 2 + .../apimachinery/pkg/labels/selector.go | 131 +- .../pkg/runtime/schema/group_version.go | 6 +- .../k8s.io/apimachinery/pkg/runtime/scheme.go | 3 +- .../pkg/runtime/serializer/codec_factory.go | 3 +- .../pkg/util/httpstream/spdy/roundtripper.go | 12 +- .../apimachinery/pkg/util/mergepatch/util.go | 3 +- .../pkg/util/strategicpatch/OWNERS | 1 + .../pkg/util/validation/validation.go | 8 +- .../k8s.io/apimachinery/pkg/util/wait/wait.go | 109 +- .../configuration/mutating_webhook_manager.go | 72 +- .../validating_webhook_manager.go | 73 +- .../validatingadmissionpolicy/controller.go | 188 +- .../controller_reconcile.go | 180 +- .../internal/generic/controller.go | 41 +- .../k8s.io/apiserver/pkg/apis/config/types.go | 21 +- .../apiserver/pkg/apis/config/v1/types.go | 21 +- .../k8s.io/apiserver/pkg/audit/evaluator.go | 16 +- .../apiserver/pkg/audit/policy/checker.go | 32 +- .../k8s.io/apiserver/pkg/endpoints/OWNERS | 2 + .../apiserver/pkg/endpoints/discovery/OWNERS | 5 + .../endpoints/discovery/aggregated/etag.go | 3 +- .../apiserver/pkg/endpoints/filters/audit.go | 10 +- .../pkg/endpoints/filters/authorization.go | 2 +- .../pkg/endpoints/filters/impersonation.go | 4 +- .../apiserver/pkg/endpoints/groupversion.go | 4 +- .../handlers/fieldmanager/admission.go | 2 +- .../handlers/fieldmanager/fieldmanager.go | 225 +- .../{ => internal}/buildmanagerinfo.go | 5 +- .../{ => internal}/capmanagers.go | 5 +- .../fieldmanager/internal/fieldmanager.go | 206 + .../fieldmanager/internal/lastapplied.go | 50 + .../{ => internal}/lastappliedmanager.go | 5 +- .../{ => internal}/lastappliedupdater.go | 27 +- .../{ => internal}/managedfieldsupdater.go | 5 +- .../handlers/fieldmanager/internal/manager.go | 52 + .../{ => internal}/skipnonapplied.go | 2 +- .../fieldmanager/{ => internal}/stripmeta.go | 2 +- .../{ => internal}/structuredmerge.go | 7 +- .../fieldmanager/internal/typeconverter.go | 112 + .../{ => internal}/versionconverter.go | 24 +- .../handlers/fieldmanager/scalehandler.go | 6 +- .../handlers/fieldmanager/typeconverter.go | 110 +- .../apiserver/pkg/endpoints/installer.go | 8 + .../pkg/endpoints/metrics/metrics.go | 44 +- .../pkg/endpoints/request/requestinfo.go | 2 +- .../apiserver/pkg/features/kube_features.go | 2 +- .../pkg/registry/generic/registry/store.go | 15 + .../apiserver/pkg/registry/rest/rest.go | 6 + .../k8s.io/apiserver/pkg/server/config.go | 104 +- .../server/egressselector/egress_selector.go | 5 + .../apiserver/pkg/server/filters/cors.go | 92 +- .../apiserver/pkg/server/filters/waitgroup.go | 36 +- .../apiserver/pkg/server/filters/wrap.go | 2 +- .../apiserver/pkg/server/genericapiserver.go | 35 +- .../pkg/server/options/authorization.go | 5 +- .../server/options/encryptionconfig/config.go | 122 +- .../encryptionconfig/controller/controller.go | 47 +- .../apiserver/pkg/server/options/etcd.go | 24 +- .../apiserver/pkg/server/options/feature.go | 5 + .../pkg/server/options/server_run_options.go | 6 - .../pkg/server/routes/debugsocket.go | 82 + .../apiserver/pkg/storage/cacher/cacher.go | 13 +- .../pkg/storage/cacher/watch_cache.go | 37 +- .../pkg/storage/etcd3/latency_tracker.go | 3 +- .../apiserver/pkg/storage/etcd3/store.go | 4 + .../value/encrypt/envelope/kmsv2/envelope.go | 71 +- .../encrypt/envelope/kmsv2/grpc_service.go | 13 +- .../encrypt/envelope/kmsv2/v2alpha1/api.pb.go | 31 +- .../encrypt/envelope/kmsv2/v2alpha1/api.proto | 3 +- .../pkg/util/flowcontrol/conc_alloc.go | 11 +- .../request/list_work_estimator.go | 5 +- .../pkg/util/flowcontrol/watch_tracker.go | 5 +- .../apiserver/pkg/util/openapi/enablement.go | 2 +- .../apiserver/pkg/util/webhook/webhook.go | 2 +- .../v1alpha1/admissionpolicyspec.go | 75 - .../v1alpha1/paramsource.go | 48 - .../admissionregistration/v1alpha1/rule.go | 76 - .../v1alpha1/rulewithoperations.go | 85 - .../admissionregistration/v1beta1/rule.go | 76 - .../v1beta1/rulewithoperations.go | 85 - .../autoscaling/v2/podresourcemetricsource.go | 52 - .../core/v1/servicespec.go | 8 +- .../extensions/v1beta1/allowedcsidriver.go | 39 - .../extensions/v1beta1/allowedflexvolume.go | 39 - .../extensions/v1beta1/allowedhostpath.go | 48 - .../v1beta1/fsgroupstrategyoptions.go | 57 - .../extensions/v1beta1/hostportrange.go | 48 - .../extensions/v1beta1/idrange.go | 48 - .../extensions/v1beta1/podsecuritypolicy.go | 247 - .../v1beta1/podsecuritypolicyspec.go | 285 - .../v1beta1/runasgroupstrategyoptions.go | 57 - .../v1beta1/runasuserstrategyoptions.go | 57 - .../v1beta1/runtimeclassstrategyoptions.go | 50 - .../v1beta1/selinuxstrategyoptions.go | 53 - .../supplementalgroupsstrategyoptions.go | 57 - .../applyconfigurations/internal/internal.go | 248 +- .../meta/v1/groupversionkind.go | 57 - .../applyconfigurations/meta/v1/listmeta.go | 66 - .../applyconfigurations/meta/v1/status.go | 142 - .../meta/v1/statuscause.go | 61 - .../meta/v1/statusdetails.go | 93 - .../client-go/discovery/discovery_client.go | 2 +- .../client-go/discovery/fake/discovery.go | 5 +- .../dynamic/dynamicinformer/informer.go | 9 +- .../vendor/k8s.io/client-go/informers/doc.go | 18 + .../informers/extensions/v1beta1/interface.go | 7 - .../extensions/v1beta1/podsecuritypolicy.go | 89 - .../k8s.io/client-go/informers/generic.go | 2 - .../vendor/k8s.io/client-go/kubernetes/doc.go | 7 +- .../fake/fake_mutatingwebhookconfiguration.go | 59 +- .../fake_validatingwebhookconfiguration.go | 59 +- .../fake/fake_validatingadmissionpolicy.go | 5 +- .../fake_validatingadmissionpolicybinding.go | 5 +- .../fake/fake_mutatingwebhookconfiguration.go | 5 +- .../fake_validatingwebhookconfiguration.go | 5 +- .../v1alpha1/fake/fake_storageversion.go | 5 +- .../apps/v1/fake/fake_controllerrevision.go | 59 +- .../typed/apps/v1/fake/fake_daemonset.go | 71 +- .../typed/apps/v1/fake/fake_deployment.go | 77 +- .../typed/apps/v1/fake/fake_replicaset.go | 77 +- .../typed/apps/v1/fake/fake_statefulset.go | 77 +- .../v1beta1/fake/fake_controllerrevision.go | 5 +- .../apps/v1beta1/fake/fake_deployment.go | 5 +- .../apps/v1beta1/fake/fake_statefulset.go | 5 +- .../v1beta2/fake/fake_controllerrevision.go | 5 +- .../typed/apps/v1beta2/fake/fake_daemonset.go | 5 +- .../apps/v1beta2/fake/fake_deployment.go | 5 +- .../apps/v1beta2/fake/fake_replicaset.go | 5 +- .../apps/v1beta2/fake/fake_statefulset.go | 5 +- .../v1/fake/fake_tokenreview.go | 5 +- .../v1alpha1/fake/fake_selfsubjectreview.go | 5 +- .../v1beta1/fake/fake_tokenreview.go | 5 +- .../v1/fake/fake_localsubjectaccessreview.go | 5 +- .../v1/fake/fake_selfsubjectaccessreview.go | 5 +- .../v1/fake/fake_selfsubjectrulesreview.go | 5 +- .../v1/fake/fake_subjectaccessreview.go | 5 +- .../fake/fake_localsubjectaccessreview.go | 5 +- .../fake/fake_selfsubjectaccessreview.go | 5 +- .../fake/fake_selfsubjectrulesreview.go | 5 +- .../v1beta1/fake/fake_subjectaccessreview.go | 5 +- .../v1/fake/fake_horizontalpodautoscaler.go | 71 +- .../v2/fake/fake_horizontalpodautoscaler.go | 5 +- .../fake/fake_horizontalpodautoscaler.go | 5 +- .../fake/fake_horizontalpodautoscaler.go | 5 +- .../typed/batch/v1/fake/fake_cronjob.go | 71 +- .../typed/batch/v1/fake/fake_job.go | 71 +- .../typed/batch/v1beta1/fake/fake_cronjob.go | 5 +- .../v1/fake/fake_certificatesigningrequest.go | 77 +- .../fake/fake_certificatesigningrequest.go | 5 +- .../typed/coordination/v1/fake/fake_lease.go | 59 +- .../coordination/v1beta1/fake/fake_lease.go | 5 +- .../core/v1/fake/fake_componentstatus.go | 59 +- .../typed/core/v1/fake/fake_configmap.go | 59 +- .../typed/core/v1/fake/fake_endpoints.go | 59 +- .../typed/core/v1/fake/fake_event.go | 59 +- .../typed/core/v1/fake/fake_limitrange.go | 59 +- .../typed/core/v1/fake/fake_namespace.go | 67 +- .../typed/core/v1/fake/fake_node.go | 71 +- .../core/v1/fake/fake_persistentvolume.go | 71 +- .../v1/fake/fake_persistentvolumeclaim.go | 71 +- .../kubernetes/typed/core/v1/fake/fake_pod.go | 77 +- .../typed/core/v1/fake/fake_podtemplate.go | 59 +- .../v1/fake/fake_replicationcontroller.go | 75 +- .../typed/core/v1/fake/fake_resourcequota.go | 71 +- .../typed/core/v1/fake/fake_secret.go | 59 +- .../typed/core/v1/fake/fake_service.go | 67 +- .../typed/core/v1/fake/fake_serviceaccount.go | 61 +- .../discovery/v1/fake/fake_endpointslice.go | 59 +- .../v1beta1/fake/fake_endpointslice.go | 5 +- .../typed/events/v1/fake/fake_event.go | 59 +- .../typed/events/v1beta1/event_expansion.go | 3 +- .../typed/events/v1beta1/fake/fake_event.go | 5 +- .../extensions/v1beta1/extensions_client.go | 5 - .../extensions/v1beta1/fake/fake_daemonset.go | 5 +- .../v1beta1/fake/fake_deployment.go | 5 +- .../v1beta1/fake/fake_extensions_client.go | 4 - .../extensions/v1beta1/fake/fake_ingress.go | 5 +- .../v1beta1/fake/fake_networkpolicy.go | 5 +- .../v1beta1/fake/fake_podsecuritypolicy.go | 146 - .../v1beta1/fake/fake_replicaset.go | 5 +- .../extensions/v1beta1/generated_expansion.go | 2 - .../extensions/v1beta1/podsecuritypolicy.go | 197 - .../v1alpha1/fake/fake_flowschema.go | 5 +- .../fake/fake_prioritylevelconfiguration.go | 5 +- .../v1beta1/fake/fake_flowschema.go | 5 +- .../fake/fake_prioritylevelconfiguration.go | 5 +- .../v1beta2/fake/fake_flowschema.go | 5 +- .../fake/fake_prioritylevelconfiguration.go | 5 +- .../v1beta3/fake/fake_flowschema.go | 5 +- .../fake/fake_prioritylevelconfiguration.go | 5 +- .../typed/networking/v1/fake/fake_ingress.go | 71 +- .../networking/v1/fake/fake_ingressclass.go | 59 +- .../networking/v1/fake/fake_networkpolicy.go | 71 +- .../v1alpha1/fake/fake_clustercidr.go | 5 +- .../networking/v1beta1/fake/fake_ingress.go | 5 +- .../v1beta1/fake/fake_ingressclass.go | 5 +- .../typed/node/v1/fake/fake_runtimeclass.go | 59 +- .../node/v1alpha1/fake/fake_runtimeclass.go | 5 +- .../node/v1beta1/fake/fake_runtimeclass.go | 5 +- .../v1/fake/fake_poddisruptionbudget.go | 71 +- .../v1beta1/fake/fake_poddisruptionbudget.go | 5 +- .../v1beta1/fake/fake_podsecuritypolicy.go | 5 +- .../typed/rbac/v1/fake/fake_clusterrole.go | 59 +- .../rbac/v1/fake/fake_clusterrolebinding.go | 59 +- .../typed/rbac/v1/fake/fake_role.go | 59 +- .../typed/rbac/v1/fake/fake_rolebinding.go | 59 +- .../rbac/v1alpha1/fake/fake_clusterrole.go | 5 +- .../v1alpha1/fake/fake_clusterrolebinding.go | 5 +- .../typed/rbac/v1alpha1/fake/fake_role.go | 5 +- .../rbac/v1alpha1/fake/fake_rolebinding.go | 5 +- .../rbac/v1beta1/fake/fake_clusterrole.go | 5 +- .../v1beta1/fake/fake_clusterrolebinding.go | 5 +- .../typed/rbac/v1beta1/fake/fake_role.go | 5 +- .../rbac/v1beta1/fake/fake_rolebinding.go | 5 +- .../v1alpha1/fake/fake_podscheduling.go | 5 +- .../v1alpha1/fake/fake_resourceclaim.go | 5 +- .../fake/fake_resourceclaimtemplate.go | 5 +- .../v1alpha1/fake/fake_resourceclass.go | 5 +- .../scheduling/v1/fake/fake_priorityclass.go | 59 +- .../v1alpha1/fake/fake_priorityclass.go | 5 +- .../v1beta1/fake/fake_priorityclass.go | 5 +- .../typed/storage/v1/fake/fake_csidriver.go | 59 +- .../typed/storage/v1/fake/fake_csinode.go | 59 +- .../v1/fake/fake_csistoragecapacity.go | 59 +- .../storage/v1/fake/fake_storageclass.go | 59 +- .../storage/v1/fake/fake_volumeattachment.go | 71 +- .../v1alpha1/fake/fake_csistoragecapacity.go | 5 +- .../v1alpha1/fake/fake_volumeattachment.go | 5 +- .../storage/v1beta1/fake/fake_csidriver.go | 5 +- .../storage/v1beta1/fake/fake_csinode.go | 5 +- .../v1beta1/fake/fake_csistoragecapacity.go | 5 +- .../storage/v1beta1/fake/fake_storageclass.go | 5 +- .../v1beta1/fake/fake_volumeattachment.go | 5 +- .../extensions/v1beta1/expansion_generated.go | 4 - .../extensions/v1beta1/podsecuritypolicy.go | 68 - .../vendor/k8s.io/client-go/openapi/OWNERS | 4 + .../vendor/k8s.io/client-go/rest/client.go | 3 +- .../vendor/k8s.io/client-go/rest/request.go | 35 +- .../k8s.io/client-go/rest/with_retry.go | 17 +- .../client-go/tools/cache/controller.go | 68 +- .../client-go/tools/cache/delta_fifo.go | 7 +- .../k8s.io/client-go/tools/cache/fifo.go | 14 +- .../k8s.io/client-go/tools/cache/reflector.go | 155 +- .../client-go/tools/cache/shared_informer.go | 141 +- .../client-go/tools/cache/synctrack/lazy.go | 83 + .../tools/cache/synctrack/synctrack.go | 116 + .../tools/events/event_broadcaster.go | 32 +- .../tools/leaderelection/leaderelection.go | 9 +- .../k8s.io/client-go/tools/watch/until.go | 5 +- .../util/certificate/certificate_manager.go | 119 +- .../vendor/k8s.io/cloud-provider/cloud.go | 5 + .../cloud-provider/service/helpers/helper.go | 2 +- .../component-base/logs/api/v1/options.go | 47 +- .../component-base/logs/api/v1/pflags.go | 9 + .../component-base/logs/api/v1/registry.go | 23 +- .../setverbositylevel/setverbositylevel.go | 34 + .../vendor/k8s.io/component-base/logs/logs.go | 19 +- .../k8s.io/component-base/metrics/metric.go | 5 +- .../k8s.io/component-base/metrics/registry.go | 14 + .../k8s.io/component-base/metrics/value.go | 3 +- .../cri-api/pkg/apis/runtime/v1/api.proto | 2 +- .../vendor/k8s.io/kms/apis/v1beta1/api.pb.go | 38 +- .../vendor/k8s.io/kms/apis/v1beta1/api.proto | 3 +- .../vendor/k8s.io/kms/apis/v2alpha1/api.pb.go | 53 +- .../vendor/k8s.io/kms/apis/v2alpha1/api.proto | 3 +- .../vendor/k8s.io/kms/service/grpc_service.go | 141 + .../vendor/k8s.io/kms/service/interface.go | 50 + .../k8s.io/kube-openapi/pkg/common/common.go | 48 +- .../pkg/common/restfuladapter/adapter.go | 1 - .../kube-openapi/pkg/handler/handler.go | 25 - .../kube-openapi/pkg/handler3/handler.go | 16 +- .../go-json-experiment/json/doc.go | 9 +- .../k8s.io/kube-openapi/pkg/spec3/example.go | 2 +- .../pkg/spec3/external_documentation.go | 2 +- .../kube-openapi/pkg/spec3/media_type.go | 2 +- .../kube-openapi/pkg/spec3/operation.go | 2 +- .../k8s.io/kube-openapi/pkg/spec3/path.go | 2 +- .../kube-openapi/pkg/spec3/request_body.go | 2 +- .../k8s.io/kube-openapi/pkg/spec3/response.go | 3 +- .../pkg/spec3/security_requirement.go | 2 +- .../kube-openapi/pkg/spec3/security_scheme.go | 2 +- .../k8s.io/kube-openapi/pkg/spec3/server.go | 3 +- .../pkg/util/proto/document_v3.go | 4 +- .../k8s.io/kube-openapi/pkg/util/util.go | 19 +- .../kube-openapi/pkg/validation/spec/paths.go | 4 +- .../config/v1/types_pluginargs.go | 4 + .../config/v1beta2/types_pluginargs.go | 4 + .../config/v1beta3/types_pluginargs.go | 4 + .../k8s.io/kubelet/config/v1beta1/types.go | 12 + .../pkg/apis/deviceplugin/v1beta1/api.pb.go | 132 +- .../pkg/apis/deviceplugin/v1beta1/api.proto | 3 +- .../kubelet/pkg/apis/dra/v1alpha1/api.pb.go | 50 +- .../kubelet/pkg/apis/dra/v1alpha1/api.proto | 6 +- .../pkg/apis/pluginregistration/v1/api.pb.go | 506 +- .../pkg/apis/pluginregistration/v1/api.proto | 5 +- .../apis/pluginregistration/v1/constants.go | 2 +- .../pkg/apis/podresources/v1/api.pb.go | 70 +- .../pkg/apis/podresources/v1/api.proto | 3 +- .../pkg/apis/podresources/v1alpha1/api.pb.go | 73 +- .../pkg/apis/podresources/v1alpha1/api.proto | 5 +- .../cmd/kube-proxy/app/conntrack.go | 4 +- .../kubernetes/cmd/kube-proxy/app/server.go | 4 +- .../cmd/kube-proxy/app/server_others.go | 50 +- .../cmd/kube-proxy/app/server_windows.go | 2 +- .../cmd/kubelet/app/init_windows.go | 8 +- .../cmd/kubelet/app/options/options.go | 20 +- .../kubernetes/cmd/kubelet/app/server.go | 16 +- .../k8s.io/kubernetes/pkg/api/service/util.go | 2 +- .../kubernetes/pkg/api/service/warnings.go | 95 + .../kubernetes/pkg/api/v1/service/util.go | 2 +- .../pkg/apis/apps/validation/validation.go | 16 +- .../k8s.io/kubernetes/pkg/apis/batch/types.go | 24 +- .../k8s.io/kubernetes/pkg/apis/core/types.go | 30 +- .../kubernetes/pkg/apis/core/v1/defaults.go | 2 +- .../pkg/apis/core/v1/validation/validation.go | 6 +- .../apis/core/v1/zz_generated.conversion.go | 8 +- .../pkg/apis/core/validation/validation.go | 60 +- .../pkg/apis/core/zz_generated.deepcopy.go | 2 +- .../pkg/apis/extensions/register.go | 3 - .../kubernetes/pkg/apis/networking/types.go | 212 +- .../kubernetes/pkg/apis/scheduling/types.go | 8 +- .../pkg/controller/controller_ref_manager.go | 15 +- .../pkg/controller/controller_utils.go | 18 +- .../controller/daemon/daemon_controller.go | 115 +- .../deployment/util/deployment_util.go | 2 +- .../pkg/credentialprovider/config.go | 9 - .../kubernetes/pkg/features/kube_features.go | 117 +- .../pkg/kubelet/apis/config/types.go | 11 + .../kubelet/apis/config/v1beta1/defaults.go | 3 + .../config/v1beta1/zz_generated.conversion.go | 4 + .../apis/config/validation/validation.go | 4 + .../kubelet/apis/podresources/server_v1.go | 2 - .../pkg/kubelet/cadvisor/cadvisor_linux.go | 7 - .../certificate/bootstrap/bootstrap.go | 5 +- .../pkg/kubelet/certificate/kubelet.go | 38 +- .../pkg/kubelet/cm/cgroup_manager_linux.go | 20 +- .../pkg/kubelet/cm/container_manager.go | 2 +- .../pkg/kubelet/cm/container_manager_linux.go | 12 +- .../kubelet/cm/cpumanager/cpu_assignment.go | 28 +- .../pkg/kubelet/cm/cpumanager/policy_none.go | 2 +- .../kubelet/cm/cpumanager/policy_static.go | 24 +- .../cm/cpumanager/state/state_checkpoint.go | 4 +- .../kubelet/cm/cpumanager/state/state_mem.go | 2 +- .../cm/cpumanager/topology/topology.go | 68 +- .../pkg/kubelet/cm/cpuset/cpuset.go | 196 +- .../cm/devicemanager/plugin/v1beta1/stub.go | 4 +- .../pkg/kubelet/cm/dra/plugin/client.go | 12 +- .../pkg/kubelet/cm/helpers_linux.go | 14 +- .../memorymanager/state/state_checkpoint.go | 4 +- .../cm/node_container_manager_linux.go | 4 +- .../kubelet/cm/qos_container_manager_linux.go | 10 +- .../k8s.io/kubernetes/pkg/kubelet/cm/types.go | 6 +- .../kubernetes/pkg/kubelet/config/defaults.go | 1 + .../pkg/kubelet/cri/remote/remote_image.go | 10 +- .../pkg/kubelet/cri/remote/remote_runtime.go | 8 +- .../k8s.io/kubernetes/pkg/kubelet/kubelet.go | 33 +- .../kubernetes/pkg/kubelet/kubelet_pods.go | 5 +- .../kuberuntime/kuberuntime_container.go | 3 +- .../kuberuntime_container_windows.go | 34 +- .../pkg/kubelet/kuberuntime/legacy.go | 4 +- .../pkg/kubelet/lifecycle/predicate.go | 3 +- .../kubelet/metrics/collectors/cri_metrics.go | 2 +- .../kubernetes/pkg/kubelet/metrics/metrics.go | 2 +- .../kubernetes/pkg/kubelet/network/dns/dns.go | 2 +- .../pkg/kubelet/pluginmanager/cache/types.go | 8 +- .../example_plugin_apis/v1beta1/api.pb.go | 35 +- .../example_plugin_apis/v1beta1/api.proto | 1 + .../example_plugin_apis/v1beta2/api.pb.go | 35 +- .../example_plugin_apis/v1beta2/api.proto | 1 + .../pluginwatcher/plugin_watcher.go | 13 +- .../pluginwatcher/plugin_watcher_others.go | 34 + .../pluginwatcher/plugin_watcher_windows.go | 40 + .../pluginmanager/reconciler/reconciler.go | 13 +- .../kubernetes/pkg/kubelet/pod/pod_manager.go | 10 +- .../kubelet/stats/cadvisor_stats_provider.go | 4 +- .../pkg/kubelet/stats/cri_stats_provider.go | 4 +- .../util/pod_startup_latency_tracker.go | 2 +- .../desired_state_of_world_populator.go | 16 +- .../reconciler/reconciler_common.go | 26 +- .../reconciler/reconstruct_common.go | 11 +- .../kubelet/volumemanager/volume_manager.go | 38 +- .../kubelet/winstats/perfcounter_nodestats.go | 4 +- .../kubernetes/pkg/kubemark/hollow_kubelet.go | 8 +- .../kubernetes/pkg/kubemark/hollow_proxy.go | 23 +- .../kubernetes/pkg/probe/dialer_others.go | 42 + .../kubernetes/pkg/probe/dialer_windows.go | 42 + .../k8s.io/kubernetes/pkg/probe/http/http.go | 10 +- .../k8s.io/kubernetes/pkg/probe/tcp/tcp.go | 6 +- .../kubernetes/pkg/proxy/apis/config/types.go | 42 - .../apis/config/validation/validation.go | 31 - .../kubernetes/pkg/proxy/config/config.go | 129 - .../k8s.io/kubernetes/pkg/proxy/endpoints.go | 180 +- .../pkg/proxy/healthcheck/common.go | 1 + .../pkg/proxy/healthcheck/service_health.go | 13 +- .../kubernetes/pkg/proxy/iptables/proxier.go | 57 +- .../kubernetes/pkg/proxy/ipvs/proxier.go | 210 +- .../k8s.io/kubernetes/pkg/proxy/node.go | 8 +- .../k8s.io/kubernetes/pkg/proxy/service.go | 22 +- .../k8s.io/kubernetes/pkg/proxy/types.go | 2 +- .../pkg/proxy/util/nodeport_addresses.go | 116 + .../k8s.io/kubernetes/pkg/proxy/util/utils.go | 95 - .../kubernetes/pkg/proxy/winkernel/hns.go | 21 +- .../kubernetes/pkg/proxy/winkernel/proxier.go | 343 +- .../scheduler/apis/config/types_pluginargs.go | 4 + .../apis/config/v1/zz_generated.conversion.go | 2 + .../apis/config/v1beta2/default_plugins.go | 1 + .../config/v1beta2/zz_generated.conversion.go | 2 + .../config/v1beta3/zz_generated.conversion.go | 2 + .../apis/config/validation/validation.go | 8 + .../pkg/scheduler/framework/cycle_state.go | 5 + .../pkg/scheduler/framework/interface.go | 83 +- .../defaultpreemption/default_preemption.go | 38 +- .../dynamicresources/dynamicresources.go | 2 +- .../framework/plugins/feature/feature.go | 1 + .../plugins/interpodaffinity/filtering.go | 8 +- .../plugins/interpodaffinity/scoring.go | 9 + .../plugins/nodeaffinity/node_affinity.go | 18 +- .../noderesources/balanced_allocation.go | 23 +- .../framework/plugins/noderesources/fit.go | 24 +- .../plugins/noderesources/least_allocated.go | 14 +- .../plugins/noderesources/most_allocated.go | 14 +- .../requested_to_capacity_ratio.go | 17 +- .../noderesources/resource_allocation.go | 50 +- .../scheduler/framework/plugins/registry.go | 1 + .../framework/plugins/volumebinding/binder.go | 134 +- .../plugins/volumebinding/fake_binder.go | 16 +- .../plugins/volumebinding/volume_binding.go | 39 +- .../plugins/volumezone/volume_zone.go | 199 +- .../framework/preemption/preemption.go | 7 +- .../scheduler/framework/runtime/framework.go | 61 +- .../pkg/scheduler/internal/cache/cache.go | 21 +- .../kubernetes/pkg/scheduler/schedule_one.go | 184 +- .../kubernetes/pkg/scheduler/scheduler.go | 5 +- .../pkg/security/apparmor/helpers.go | 18 - .../kubernetes/pkg/securitycontext/util.go | 24 - .../util/async/bounded_frequency_runner.go | 2 +- .../pkg/util/conntrack/conntrack.go | 6 - .../kubernetes/pkg/util/iptables/iptables.go | 29 +- .../k8s.io/kubernetes/pkg/util/ipvs/ipvs.go | 29 - .../kubernetes/pkg/volume/cephfs/cephfs.go | 2 +- .../pkg/volume/configmap/configmap.go | 12 +- .../csi/nodeinfomanager/nodeinfomanager.go | 12 +- .../pkg/volume/downwardapi/downwardapi.go | 13 +- .../pkg/volume/hostpath/host_path.go | 16 +- .../kubernetes/pkg/volume/local/local.go | 2 +- .../pkg/volume/projected/projected.go | 12 +- .../kubernetes/pkg/volume/secret/secret.go | 12 +- .../pkg/volume/util/atomic_writer.go | 48 +- .../kubernetes/pkg/volume/util/resize_util.go | 17 - .../k8s.io/kubernetes/pkg/volume/util/util.go | 38 + .../k8s.io/kubernetes/test/utils/runners.go | 166 +- .../k8s.io/legacy-cloud-providers/aws/aws.go | 5 +- .../aws/aws_loadbalancer.go | 3 +- .../azure/azure_backoff.go | 22 +- .../azure/azure_blobDiskController.go | 6 +- .../azure/azure_controller_standard.go | 6 +- .../azure/azure_controller_vmss.go | 6 +- .../azure/azure_loadbalancer.go | 172 +- .../azure/azure_managedDiskController.go | 8 +- .../azure/azure_routes.go | 26 +- .../azure/azure_standard.go | 40 +- .../azure/azure_storageaccount.go | 6 +- .../azure/azure_utils.go | 7 +- .../azure/azure_vmss.go | 22 +- .../azure/azure_vmss_cache.go | 6 +- .../azure/azure_wrap.go | 4 +- .../azure_containerserviceclient.go | 8 +- .../azure_deploymentclient.go | 8 +- .../clients/diskclient/azure_diskclient.go | 8 +- .../azure_loadbalancerclient.go | 8 +- .../publicipclient/azure_publicipclient.go | 8 +- .../azure_securitygroupclient.go | 8 +- .../snapshotclient/azure_snapshotclient.go | 8 +- .../azure_storageaccountclient.go | 8 +- .../subnetclient/azure_subnetclient.go | 8 +- .../azure/clients/vmclient/azure_vmclient.go | 8 +- .../clients/vmssclient/azure_vmssclient.go | 8 +- .../vmssvmclient/azure_vmssvmclient.go | 8 +- .../vsphere/vclib/utils.go | 2 +- .../vendor/k8s.io/mount-utils/fake_mounter.go | 2 +- .../vendor/k8s.io/mount-utils/mount.go | 4 +- .../k8s.io/mount-utils/mount_helper_common.go | 8 +- .../vendor/k8s.io/mount-utils/mount_linux.go | 15 +- .../k8s.io/mount-utils/mount_unsupported.go | 4 +- .../k8s.io/mount-utils/mount_windows.go | 4 +- .../k8s.io/mount-utils/resizefs_linux.go | 33 +- cluster-autoscaler/vendor/modules.txt | 157 +- .../konnectivity-client/pkg/client/client.go | 165 +- .../konnectivity-client/pkg/client/conn.go | 7 +- .../pkg/client/metrics/metrics.go | 162 + .../pkg/common/metrics/metrics.go | 78 + .../proto/client/client.pb.go | 1072 +- .../proto/client/client.proto | 5 - .../proto/client/client_grpc.pb.go | 150 + cluster-autoscaler/version/version.go | 2 +- 828 files changed, 34503 insertions(+), 30548 deletions(-) create mode 100644 cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go create mode 100644 cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go delete mode 100644 cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/.travis.yml delete mode 100644 cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/.travis.yml create mode 100644 cluster-autoscaler/vendor/github.com/go-openapi/swag/.gitattributes delete mode 100644 cluster-autoscaler/vendor/github.com/go-openapi/swag/.travis.yml create mode 100644 cluster-autoscaler/vendor/github.com/go-openapi/swag/file.go delete mode 100644 cluster-autoscaler/vendor/github.com/google/cadvisor/accelerators/nvidia.go create mode 100644 cluster-autoscaler/vendor/github.com/google/uuid/null.go delete mode 100644 cluster-autoscaler/vendor/github.com/mindprince/gonvml/.travis.gofmt.sh delete mode 100644 cluster-autoscaler/vendor/github.com/mindprince/gonvml/.travis.yml delete mode 100644 cluster-autoscaler/vendor/github.com/mindprince/gonvml/LICENSE delete mode 100644 cluster-autoscaler/vendor/github.com/mindprince/gonvml/Makefile delete mode 100644 cluster-autoscaler/vendor/github.com/mindprince/gonvml/NVML_NOTICE delete mode 100644 cluster-autoscaler/vendor/github.com/mindprince/gonvml/README.md delete mode 100644 cluster-autoscaler/vendor/github.com/mindprince/gonvml/bindings.go delete mode 100644 cluster-autoscaler/vendor/github.com/mindprince/gonvml/bindings_nocgo.go delete mode 100644 cluster-autoscaler/vendor/github.com/mindprince/gonvml/nvml.h create mode 100644 cluster-autoscaler/vendor/github.com/moby/ipvs/.golangci.yml rename cluster-autoscaler/vendor/github.com/moby/ipvs/{constants.go => constants_linux.go} (93%) rename cluster-autoscaler/vendor/github.com/moby/ipvs/{ipvs.go => ipvs_linux.go} (99%) rename cluster-autoscaler/vendor/github.com/moby/ipvs/{netlink.go => netlink_linux.go} (93%) create mode 100644 cluster-autoscaler/vendor/github.com/vishvananda/netns/doc.go rename cluster-autoscaler/vendor/github.com/vishvananda/netns/{netns_unspecified.go => netns_others.go} (63%) rename cluster-autoscaler/vendor/github.com/vishvananda/netns/{netns.go => nshandle_linux.go} (76%) create mode 100644 cluster-autoscaler/vendor/github.com/vishvananda/netns/nshandle_others.go create mode 100644 cluster-autoscaler/vendor/github.com/vmware/govmomi/history/collector.go create mode 100644 cluster-autoscaler/vendor/github.com/vmware/govmomi/internal/helpers.go create mode 100644 cluster-autoscaler/vendor/github.com/vmware/govmomi/internal/methods.go create mode 100644 cluster-autoscaler/vendor/github.com/vmware/govmomi/internal/types.go create mode 100644 cluster-autoscaler/vendor/github.com/vmware/govmomi/internal/version/version.go delete mode 100644 cluster-autoscaler/vendor/github.com/vmware/govmomi/object/history_collector.go create mode 100644 cluster-autoscaler/vendor/github.com/vmware/govmomi/object/tenant_manager.go create mode 100644 cluster-autoscaler/vendor/github.com/vmware/govmomi/session/keepalive/handler.go create mode 100644 cluster-autoscaler/vendor/github.com/vmware/govmomi/task/history_collector.go create mode 100644 cluster-autoscaler/vendor/github.com/vmware/govmomi/task/manager.go create mode 100644 cluster-autoscaler/vendor/github.com/vmware/govmomi/vapi/rest/resource.go create mode 100644 cluster-autoscaler/vendor/github.com/vmware/govmomi/vapi/tags/errors.go create mode 100644 cluster-autoscaler/vendor/github.com/vmware/govmomi/view/container_view.go create mode 100644 cluster-autoscaler/vendor/github.com/vmware/govmomi/view/list_view.go create mode 100644 cluster-autoscaler/vendor/github.com/vmware/govmomi/view/managed_object_view.go create mode 100644 cluster-autoscaler/vendor/github.com/vmware/govmomi/view/manager.go create mode 100644 cluster-autoscaler/vendor/github.com/vmware/govmomi/view/task_view.go create mode 100644 cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/debug/file.go create mode 100644 cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/debug/log.go create mode 100644 cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/methods/unreleased.go create mode 100644 cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/types/unreleased.go create mode 100644 cluster-autoscaler/vendor/google.golang.org/grpc/internal/envconfig/observability.go create mode 100644 cluster-autoscaler/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go create mode 100644 cluster-autoscaler/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/OWNERS create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/discovery/OWNERS rename cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/{ => internal}/buildmanagerinfo.go (94%) rename cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/{ => internal}/capmanagers.go (96%) create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/fieldmanager.go create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/lastapplied.go rename cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/{ => internal}/lastappliedmanager.go (97%) rename cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/{ => internal}/lastappliedupdater.go (78%) rename cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/{ => internal}/managedfieldsupdater.go (95%) create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/manager.go rename cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/{ => internal}/skipnonapplied.go (99%) rename cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/{ => internal}/stripmeta.go (99%) rename cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/{ => internal}/structuredmerge.go (97%) create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/typeconverter.go rename cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/{ => internal}/versionconverter.go (87%) create mode 100644 cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/routes/debugsocket.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/admissionpolicyspec.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramsource.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/rule.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/rulewithoperations.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rule.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rulewithoperations.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podresourcemetricsource.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedcsidriver.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedflexvolume.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedhostpath.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/fsgroupstrategyoptions.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/hostportrange.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/idrange.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicy.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicyspec.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasgroupstrategyoptions.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasuserstrategyoptions.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runtimeclassstrategyoptions.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/selinuxstrategyoptions.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/supplementalgroupsstrategyoptions.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/meta/v1/groupversionkind.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/meta/v1/listmeta.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/meta/v1/status.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/meta/v1/statuscause.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/meta/v1/statusdetails.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/informers/doc.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/listers/extensions/v1beta1/podsecuritypolicy.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/openapi/OWNERS create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/synctrack/lazy.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/synctrack/synctrack.go create mode 100644 cluster-autoscaler/vendor/k8s.io/component-base/logs/internal/setverbositylevel/setverbositylevel.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kms/service/grpc_service.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kms/service/interface.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/service/warnings.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/plugin_watcher_others.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/plugin_watcher_windows.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/dialer_others.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/dialer_windows.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/nodeport_addresses.go create mode 100644 cluster-autoscaler/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics/metrics.go create mode 100644 cluster-autoscaler/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics/metrics.go create mode 100644 cluster-autoscaler/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client_grpc.pb.go diff --git a/cluster-autoscaler/go.mod b/cluster-autoscaler/go.mod index c63f28750925..8b5119ceaf68 100644 --- a/cluster-autoscaler/go.mod +++ b/cluster-autoscaler/go.mod @@ -11,7 +11,7 @@ require ( github.com/Azure/go-autorest/autorest/date v0.3.0 github.com/Azure/go-autorest/autorest/to v0.4.0 github.com/Azure/skewer v0.0.14 - github.com/aws/aws-sdk-go v1.44.116 + github.com/aws/aws-sdk-go v1.44.147 github.com/digitalocean/godo v1.27.0 github.com/ghodss/yaml v1.0.0 github.com/gofrs/uuid v4.0.0+incompatible @@ -19,31 +19,31 @@ require ( github.com/golang/mock v1.6.0 github.com/google/go-cmp v0.5.9 github.com/google/go-querystring v1.0.0 - github.com/google/uuid v1.1.2 + github.com/google/uuid v1.3.0 github.com/jmespath/go-jmespath v0.4.0 github.com/json-iterator/go v1.1.12 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.14.0 github.com/satori/go.uuid v1.2.0 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.0 + github.com/stretchr/testify v1.8.1 golang.org/x/crypto v0.1.0 - golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 + golang.org/x/net v0.4.0 golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b google.golang.org/api v0.60.0 - google.golang.org/grpc v1.49.0 + google.golang.org/grpc v1.51.0 google.golang.org/protobuf v1.28.1 gopkg.in/gcfg.v1 v1.2.0 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.26.0 - k8s.io/apimachinery v0.26.0 - k8s.io/apiserver v0.26.0 - k8s.io/client-go v0.26.0 - k8s.io/cloud-provider v0.26.0 - k8s.io/component-base v0.26.0 - k8s.io/component-helpers v0.26.0 + k8s.io/api v0.27.0-alpha.1 + k8s.io/apimachinery v0.27.0-alpha.1 + k8s.io/apiserver v0.27.0-alpha.1 + k8s.io/client-go v0.27.0-alpha.1 + k8s.io/cloud-provider v0.27.0-alpha.1 + k8s.io/component-base v0.27.0-alpha.1 + k8s.io/component-helpers v0.27.0-alpha.1 k8s.io/klog/v2 v2.80.1 - k8s.io/kubernetes v1.26.0 + k8s.io/kubernetes v1.27.0-alpha.1 k8s.io/legacy-cloud-providers v0.0.0 k8s.io/utils v0.0.0-20221107191617-1a15be271d1d sigs.k8s.io/cloud-provider-azure v1.24.2 @@ -59,7 +59,7 @@ require ( github.com/GoogleCloudPlatform/k8s-cloud-provider v1.18.1-0.20220218231025-f11817397a1b // indirect github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab // indirect github.com/Microsoft/go-winio v0.4.17 // indirect - github.com/Microsoft/hcsshim v0.8.22 // indirect + github.com/Microsoft/hcsshim v0.8.25 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e // indirect @@ -87,15 +87,15 @@ require ( github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.20.0 // indirect - github.com/go-openapi/swag v0.19.14 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.1 // indirect + github.com/go-openapi/swag v0.22.3 // indirect github.com/godbus/dbus/v5 v5.0.6 // indirect github.com/golang-jwt/jwt/v4 v4.2.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect - github.com/google/cadvisor v0.46.0 // indirect - github.com/google/cel-go v0.12.5 // indirect + github.com/google/cadvisor v0.47.1 // indirect + github.com/google/cel-go v0.12.6 // indirect github.com/google/gnostic v0.5.7-v3refs // indirect github.com/google/gofuzz v1.1.0 // indirect github.com/googleapis/gax-go/v2 v2.1.1 // indirect @@ -107,12 +107,11 @@ require ( github.com/karrick/godirwalk v1.17.0 // indirect github.com/libopenstorage/openstorage v1.0.0 // indirect github.com/lithammer/dedent v1.1.0 // indirect - github.com/mailru/easyjson v0.7.6 // indirect + github.com/mailru/easyjson v0.7.7 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect - github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989 // indirect github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/moby/ipvs v1.0.1 // indirect + github.com/moby/ipvs v1.1.0 // indirect github.com/moby/spdystream v0.2.0 // indirect github.com/moby/sys/mountinfo v0.6.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect @@ -123,7 +122,7 @@ require ( github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/runc v1.1.4 // indirect - github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 // indirect + github.com/opencontainers/runtime-spec v1.0.3-0.20220909204839-494a5a6aca78 // indirect github.com/opencontainers/selinux v1.10.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect @@ -131,14 +130,14 @@ require ( github.com/prometheus/procfs v0.8.0 // indirect github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021 // indirect github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 // indirect - github.com/sirupsen/logrus v1.8.1 // indirect + github.com/sirupsen/logrus v1.9.0 // indirect github.com/spf13/cobra v1.6.0 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect - github.com/stretchr/objx v0.4.0 // indirect + github.com/stretchr/objx v0.5.0 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect github.com/vishvananda/netlink v1.1.0 // indirect - github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae // indirect - github.com/vmware/govmomi v0.20.3 // indirect + github.com/vishvananda/netns v0.0.2 // indirect + github.com/vmware/govmomi v0.30.0 // indirect go.etcd.io/etcd/api/v3 v3.5.5 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.5 // indirect go.etcd.io/etcd/client/v3 v3.5.5 // indirect @@ -157,7 +156,7 @@ require ( go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.19.0 // indirect - golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect + golang.org/x/sync v0.1.0 // indirect golang.org/x/sys v0.3.0 // indirect golang.org/x/term v0.3.0 // indirect golang.org/x/text v0.5.0 // indirect @@ -169,16 +168,16 @@ require ( gopkg.in/warnings.v0 v0.1.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/cri-api v0.0.0 // indirect - k8s.io/csi-translation-lib v0.26.0 // indirect + k8s.io/csi-translation-lib v0.27.0-alpha.1 // indirect k8s.io/dynamic-resource-allocation v0.0.0 // indirect - k8s.io/kms v0.26.0 // indirect - k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect + k8s.io/kms v0.27.0-alpha.1 // indirect + k8s.io/kube-openapi v0.0.0-20230109183929-3758b55a6596 // indirect k8s.io/kube-proxy v0.0.0 // indirect k8s.io/kube-scheduler v0.0.0 // indirect k8s.io/kubectl v0.0.0 // indirect - k8s.io/kubelet v0.26.0 // indirect + k8s.io/kubelet v0.27.0-alpha.1 // indirect k8s.io/mount-utils v0.26.0-alpha.0 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.33 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.1 // indirect sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect sigs.k8s.io/yaml v1.3.0 // indirect @@ -190,60 +189,62 @@ replace github.com/digitalocean/godo => github.com/digitalocean/godo v1.27.0 replace github.com/rancher/go-rancher => github.com/rancher/go-rancher v0.1.0 -replace k8s.io/api => k8s.io/api v0.26.0 +replace k8s.io/api => k8s.io/api v0.27.0-alpha.1 -replace k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.26.0 +replace k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.27.0-alpha.1 -replace k8s.io/apimachinery => k8s.io/apimachinery v0.26.1-rc.0 +replace k8s.io/apimachinery => k8s.io/apimachinery v0.27.0-alpha.1 -replace k8s.io/apiserver => k8s.io/apiserver v0.26.0 +replace k8s.io/apiserver => k8s.io/apiserver v0.27.0-alpha.1 -replace k8s.io/cli-runtime => k8s.io/cli-runtime v0.26.0 +replace k8s.io/cli-runtime => k8s.io/cli-runtime v0.27.0-alpha.1 -replace k8s.io/client-go => k8s.io/client-go v0.26.0 +replace k8s.io/client-go => k8s.io/client-go v0.27.0-alpha.1 -replace k8s.io/cloud-provider => k8s.io/cloud-provider v0.26.0 +replace k8s.io/cloud-provider => k8s.io/cloud-provider v0.27.0-alpha.1 -replace k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.26.0 +replace k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.27.0-alpha.1 -replace k8s.io/code-generator => k8s.io/code-generator v0.26.1-rc.0 +replace k8s.io/code-generator => k8s.io/code-generator v0.27.0-alpha.1 -replace k8s.io/component-base => k8s.io/component-base v0.26.0 +replace k8s.io/component-base => k8s.io/component-base v0.27.0-alpha.1 -replace k8s.io/component-helpers => k8s.io/component-helpers v0.26.0 +replace k8s.io/component-helpers => k8s.io/component-helpers v0.27.0-alpha.1 -replace k8s.io/controller-manager => k8s.io/controller-manager v0.26.0 +replace k8s.io/controller-manager => k8s.io/controller-manager v0.27.0-alpha.1 -replace k8s.io/cri-api => k8s.io/cri-api v0.26.1-rc.0 +replace k8s.io/cri-api => k8s.io/cri-api v0.27.0-alpha.1 -replace k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.26.0 +replace k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.27.0-alpha.1 -replace k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.26.0 +replace k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.27.0-alpha.1 -replace k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.26.0 +replace k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.27.0-alpha.1 -replace k8s.io/kube-proxy => k8s.io/kube-proxy v0.26.0 +replace k8s.io/kube-proxy => k8s.io/kube-proxy v0.27.0-alpha.1 -replace k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.26.0 +replace k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.27.0-alpha.1 -replace k8s.io/kubectl => k8s.io/kubectl v0.26.0 +replace k8s.io/kubectl => k8s.io/kubectl v0.27.0-alpha.1 -replace k8s.io/kubelet => k8s.io/kubelet v0.26.0 +replace k8s.io/kubelet => k8s.io/kubelet v0.27.0-alpha.1 -replace k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.26.0 +replace k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.27.0-alpha.1 -replace k8s.io/metrics => k8s.io/metrics v0.26.0 +replace k8s.io/metrics => k8s.io/metrics v0.27.0-alpha.1 -replace k8s.io/mount-utils => k8s.io/mount-utils v0.26.1-rc.0 +replace k8s.io/mount-utils => k8s.io/mount-utils v0.27.0-alpha.1 -replace k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.26.0 +replace k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.27.0-alpha.1 -replace k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.26.0 +replace k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.27.0-alpha.1 -replace k8s.io/sample-controller => k8s.io/sample-controller v0.26.0 +replace k8s.io/sample-controller => k8s.io/sample-controller v0.27.0-alpha.1 -replace k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.26.0 +replace k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.27.0-alpha.1 -replace k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.26.0 +replace k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.27.0-alpha.1 -replace k8s.io/kms => k8s.io/kms v0.26.1-rc.0 +replace k8s.io/kms => k8s.io/kms v0.27.0-alpha.1 + +replace k8s.io/noderesourcetopology-api => k8s.io/noderesourcetopology-api v0.27.0-alpha.1 diff --git a/cluster-autoscaler/go.sum b/cluster-autoscaler/go.sum index e0faca473abc..d6f485c14b3a 100644 --- a/cluster-autoscaler/go.sum +++ b/cluster-autoscaler/go.sum @@ -94,8 +94,8 @@ github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5 github.com/Microsoft/go-winio v0.4.15/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/hcsshim v0.8.22 h1:CulZ3GW8sNJExknToo+RWD+U+6ZM5kkNfuxywSDPd08= -github.com/Microsoft/hcsshim v0.8.22/go.mod h1:91uVCVzvX2QD16sMCenoxxXo6L1wJnLMX2PSufFMtF0= +github.com/Microsoft/hcsshim v0.8.25 h1:fRMwXiwk3qDwc0P05eHnh+y2v07JdtsfQ1fuAc69m9g= +github.com/Microsoft/hcsshim v0.8.25/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= @@ -112,8 +112,8 @@ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hC github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= -github.com/aws/aws-sdk-go v1.44.116 h1:NpLIhcvLWXJZAEwvPj3TDHeqp7DleK6ZUVYyW01WNHY= -github.com/aws/aws-sdk-go v1.44.116/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.44.147 h1:C/YQv0QAvRHio4cESBTFGh8aI/JM9VdRislDIOz/Dx4= +github.com/aws/aws-sdk-go v1.44.147/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -124,6 +124,7 @@ github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdn github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -161,7 +162,6 @@ github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMX github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= -github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= github.com/containerd/ttrpc v1.1.0 h1:GbtyLRxb0gOLR0TYQWt3O6B0NvT8tMdorEHqIQo/lWI= github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY= @@ -180,7 +180,7 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -196,8 +196,8 @@ github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/ github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v20.10.18+incompatible h1:SN84VYXTBNGn92T/QwIRPlum9zfemfitN7pbsp26WSc= -github.com/docker/docker v20.10.18+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog= +github.com/docker/docker v20.10.21+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= @@ -206,7 +206,6 @@ github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -252,14 +251,13 @@ github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8= +github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -270,7 +268,6 @@ github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRx github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= @@ -317,10 +314,10 @@ github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/cadvisor v0.46.0 h1:ryTIniqhN8/wR8UA1RuYSXHvsAtdpk/01XwTZtYHekY= -github.com/google/cadvisor v0.46.0/go.mod h1:YnCDnR8amaS0HoMEjheOI0TMPzFKCBLc30mciLEjwGI= -github.com/google/cel-go v0.12.5 h1:DmzaiSgoaqGCjtpPQWl26/gND+yRpim56H1jCVev6d8= -github.com/google/cel-go v0.12.5/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw= +github.com/google/cadvisor v0.47.1 h1:YyKnRy/3myRNGOvF1bNF9FFnpjY7Gky5yKi/ZlN+BSo= +github.com/google/cadvisor v0.47.1/go.mod h1:iJdTjcjyKHjLCf7OSTzwP5GxdfrkPusw2x5bwGvuLUw= +github.com/google/cel-go v0.12.6 h1:kjeKudqV0OygrAqA9fX6J55S8gj+Jre2tckIm5RoG4M= +github.com/google/cel-go v0.12.6/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw= github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -362,8 +359,9 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= @@ -412,17 +410,15 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8 github.com/karrick/godirwalk v1.17.0 h1:b4kY7nqDdioR/6qnbHQyDvmA17u5G1cZ6J+CZXwSWoI= github.com/karrick/godirwalk v1.17.0/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -432,28 +428,24 @@ github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wn github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09JkG9WeqChjprR5s9bBZ+OM= github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989 h1:PS1dLCGtD8bb9RPKJrc8bS7qHL6JnW1CZvwzH9dPoUs= -github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989/go.mod h1:2eu9pRWp8mo84xCg6KswZ+USQHjwgRhNp06sozOdsTY= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/moby/ipvs v1.0.1 h1:aoZ7fhLTXgDbzVrAnvV+XbKOU8kOET7B3+xULDF/1o0= -github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= +github.com/moby/ipvs v1.1.0 h1:ONN4pGaZQgAx+1Scz5RvWV4Q7Gb+mvfRh3NsPS+1XQQ= +github.com/moby/ipvs v1.1.0/go.mod h1:4VJMWuf098bsUMmZEiD4Tjk/O7mOn3l1PTD3s4OoYAs= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= -github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= +github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -472,12 +464,10 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= -github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys= +github.com/onsi/ginkgo/v2 v2.7.0 h1:/XxtEV3I3Eif/HobnVx9YmJgk8ENdRsuUmM+fLCFNow= +github.com/onsi/gomega v1.24.2 h1:J/tulyYK6JwBldPViHJReihxxZ+22FHs0piGjQAvoUE= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= @@ -486,8 +476,9 @@ github.com/opencontainers/runc v1.1.4 h1:nRCz/8sKg6K6jgYAFLDlXzPeITBZJyX28DBVhWD github.com/opencontainers/runc v1.1.4/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20220909204839-494a5a6aca78 h1:R5M2qXZiK/mWPMT4VldCOiSL9HIAMuxQZWdG0CSM5+4= +github.com/opencontainers/runtime-spec v1.0.3-0.20220909204839-494a5a6aca78/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.10.0 h1:rAiKF8hTcgLI3w0DHm6i0ylVVcOrlgR1kK99DRLDhyU= github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= @@ -504,7 +495,6 @@ github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -523,7 +513,6 @@ github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8 github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= @@ -534,6 +523,7 @@ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40T github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021 h1:if3/24+h9Sq6eDx8UUz1SO9cT9tizyIsATfB7b4D3tc= github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -548,8 +538,9 @@ github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMB github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= @@ -568,8 +559,9 @@ github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ai github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -577,22 +569,23 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA= +github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= -github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae h1:4hwBBUfQCFe3Cym0ZtKyq7L16eZUtYKs+BaHDN6mAns= -github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/vmware/govmomi v0.20.3 h1:gpw/0Ku+6RgF3jsi7fnCLmlcikBHfKBCUcu1qgc16OU= -github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= +github.com/vishvananda/netns v0.0.2 h1:Cn05BRLm+iRP/DZxyVSsfVyrzgjDbwHwkVt38qvXnNI= +github.com/vishvananda/netns v0.0.2/go.mod h1:yitZXdAVI+yPFSb4QUe+VW3vOVl4PZPNcBgbPxAtJxw= +github.com/vmware/govmomi v0.30.0 h1:Fm8ugPnnlMSTSceDKY9goGvjmqc6eQLPUSUeNXdpeXA= +github.com/vmware/govmomi v0.30.0/go.mod h1:F7adsVewLNHsW/IIm7ziFURaXDaHEwcc+ym4r3INMdY= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= @@ -601,6 +594,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/etcd/api/v3 v3.5.5 h1:BX4JIbQ7hl7+jL+g+2j5UAr0o1bctCm6/Ct+ArBGkf0= @@ -706,6 +700,7 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -750,8 +745,10 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 h1:Frnccbp+ok2GkUS2tC84yAq/U9Vg+0sIO7aRL3T4Xnc= -golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -784,8 +781,9 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -808,11 +806,9 @@ golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -856,11 +852,16 @@ golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -872,6 +873,7 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -881,7 +883,6 @@ golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0k golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -936,6 +937,7 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -992,7 +994,6 @@ google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -1070,8 +1071,8 @@ google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9K google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw= -google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= +google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1093,8 +1094,8 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/gcfg.v1 v1.2.0 h1:0HIbH907iBTAntm+88IJV2qmJALDAh8sPekI9Vc1fm0= gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= @@ -1121,7 +1122,6 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1130,55 +1130,55 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.26.0 h1:IpPlZnxBpV1xl7TGk/X6lFtpgjgntCg8PJ+qrPHAC7I= -k8s.io/api v0.26.0/go.mod h1:k6HDTaIFC8yn1i6pSClSqIwLABIcLV9l5Q4EcngKnQg= -k8s.io/apimachinery v0.26.1-rc.0 h1:3eoGy0bC3cheClZ1RfKsZRsHdTOjAM//aqhxVlSLxKE= -k8s.io/apimachinery v0.26.1-rc.0/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74= -k8s.io/apiserver v0.26.0 h1:q+LqIK5EZwdznGZb8bq0+a+vCqdeEEe4Ux3zsOjbc4o= -k8s.io/apiserver v0.26.0/go.mod h1:aWhlLD+mU+xRo+zhkvP/gFNbShI4wBDHS33o0+JGI84= -k8s.io/client-go v0.26.0 h1:lT1D3OfO+wIi9UFolCrifbjUUgu7CpLca0AD8ghRLI8= -k8s.io/client-go v0.26.0/go.mod h1:I2Sh57A79EQsDmn7F7ASpmru1cceh3ocVT9KlX2jEZg= -k8s.io/cloud-provider v0.26.0 h1:kO2BIgCou71QNRHGkpFi/8lnas9UIr+fJz1l/nuiOMo= -k8s.io/cloud-provider v0.26.0/go.mod h1:JwfUAH67C8f7t6tOC4v4ty+DuvIYVjNF6bGVYSDCqqs= -k8s.io/component-base v0.26.0 h1:0IkChOCohtDHttmKuz+EP3j3+qKmV55rM9gIFTXA7Vs= -k8s.io/component-base v0.26.0/go.mod h1:lqHwlfV1/haa14F/Z5Zizk5QmzaVf23nQzCwVOQpfC8= -k8s.io/component-helpers v0.26.0 h1:KNgwqs3EUdK0HLfW4GhnbD+q/Zl9U021VfIU7qoVYFk= -k8s.io/component-helpers v0.26.0/go.mod h1:jHN01qS/Jdj95WCbTe9S2VZ9yxpxXNY488WjF+yW4fo= -k8s.io/cri-api v0.26.1-rc.0 h1:mq430QOK1tA1/tN24/AZq50b3SuS5ngnYUph5WCzDS0= -k8s.io/cri-api v0.26.1-rc.0/go.mod h1:I5TGOn/ziMzqIcUvsYZzVE8xDAB1JBkvcwvR0yDreuw= -k8s.io/csi-translation-lib v0.26.0 h1:bCvlfw53Kmyn7cvXeYGe9aqqzR1b0xrGs2XEWHFW+es= -k8s.io/csi-translation-lib v0.26.0/go.mod h1:zRKLRqER6rA8NCKQBhVIdkyDHKgNlu2BK1RKTHjcw+8= -k8s.io/dynamic-resource-allocation v0.26.0 h1:zljrsqa0PxrIwNklTnGBA/az6+33SUQwsNNNKdVTzwg= -k8s.io/dynamic-resource-allocation v0.26.0/go.mod h1:K+hO5A+QsSknRjlhfbUtvZVYUblOldvYyT51eGrZyWI= +k8s.io/api v0.27.0-alpha.1 h1:4L3MEcje+LTMfkZrRaYOzA5a0MTvv3uqonblgQZ39E8= +k8s.io/api v0.27.0-alpha.1/go.mod h1:pUUR9UVsje2ip8mF3GilkKhUt21HVrMB0x0Mzta5HQw= +k8s.io/apimachinery v0.27.0-alpha.1 h1:q8VsOXO0tQsvrz6QSvolMvlciOFCRpCgQBD1lw/MHSo= +k8s.io/apimachinery v0.27.0-alpha.1/go.mod h1:mb1AP2xs7Ajs+OvXRynwIgKVID9/rOtI7lFxIixvFp0= +k8s.io/apiserver v0.27.0-alpha.1 h1:2bMQrLAKBkn+SSlB5zWjYPBuebh8/oi5aItXPOtNUdU= +k8s.io/apiserver v0.27.0-alpha.1/go.mod h1:yUdhHGNuvU2CpeigdIu+cKa3wVM93+aZlZU8MhbXH7E= +k8s.io/client-go v0.27.0-alpha.1 h1:GLfzvPrPjCk/WlcaX1JzpSYEtN1p+j6QgMNoQsj5JIQ= +k8s.io/client-go v0.27.0-alpha.1/go.mod h1:XGeHlRblXNGUEWfmNxWH66XcNFk1RvwgarhDbUNtpeE= +k8s.io/cloud-provider v0.27.0-alpha.1 h1:2p28CtLVr41pz6/Il3BnNXuKCeCyeq+itTQBYXBx6Zg= +k8s.io/cloud-provider v0.27.0-alpha.1/go.mod h1:7u1gjRbf9VDV73sDNqMUi56ipifmyc9USPzZXuOHL9s= +k8s.io/component-base v0.27.0-alpha.1 h1:+5F9ilxocwZsj+Z8mS6nm64ZYQy6dRB+aYk6D1ApRjU= +k8s.io/component-base v0.27.0-alpha.1/go.mod h1:5R5aAxOBbwPiDQrgRfV3nQHa+lhOXCUsiXOVWubXKio= +k8s.io/component-helpers v0.27.0-alpha.1 h1:ben8y2mRXBx6tls8V8PF/MK8GkS6QdFnDjOGZ/KsIPY= +k8s.io/component-helpers v0.27.0-alpha.1/go.mod h1:AULEoyFqwh3THhMUjrDtWRGqaxx0OpH6s5IfdmU+E6g= +k8s.io/cri-api v0.27.0-alpha.1 h1:4FujdgMZWoa0KkxipDAJnj564Z+tr1BATHxHZ6MQ6Hk= +k8s.io/cri-api v0.27.0-alpha.1/go.mod h1:4wcbqN7evDJ7BYMZl9iOVTdh/wtvOc2bqEpBwCk5quY= +k8s.io/csi-translation-lib v0.27.0-alpha.1 h1:/joQUMhvB8JlZSOKfDcmR5JEaI8HAKXq+z5FHSXnLRg= +k8s.io/csi-translation-lib v0.27.0-alpha.1/go.mod h1:JcaGK9EA3iU4ZarfNnPVSSlL+ev4S2xveIhrfY+NjxI= +k8s.io/dynamic-resource-allocation v0.27.0-alpha.1 h1:Ah1EnqYEml3MsOjjASgLY591YuNT9+mxkGHqIPbP8YI= +k8s.io/dynamic-resource-allocation v0.27.0-alpha.1/go.mod h1:TcVld/WY7j1FroVM6BkBDuhcSRHbXAZe2dJBTrmEK1c= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kms v0.26.1-rc.0 h1:JB4/QyqY/dBX991K1FxGDIzhmG7g4Pw3FEBmzweDPG8= -k8s.io/kms v0.26.1-rc.0/go.mod h1:ReC1IEGuxgfN+PDCIpR6w8+XMmDE7uJhxcCwMZFdIYc= -k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= -k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= -k8s.io/kube-proxy v0.26.0 h1:VBC83bWr5L4GKSxRFz0YBbwGgQITc0+p8avGzw0LNKo= -k8s.io/kube-proxy v0.26.0/go.mod h1:4kz3dPdMUnspJnFgoJG9lWn1UCiho85Gyn1WLInK0XA= -k8s.io/kube-scheduler v0.26.0 h1:PjSF4cF9X7cAMj5MZ9ZSq2RJ2VkcKKCKj6fy/EbxtA0= -k8s.io/kube-scheduler v0.26.0/go.mod h1:FmptJbq36ATKYxeR+UqAvUtFaLeoFWgoDk1cdCpVPYQ= -k8s.io/kubectl v0.26.0 h1:xmrzoKR9CyNdzxBmXV7jW9Ln8WMrwRK6hGbbf69o4T0= -k8s.io/kubectl v0.26.0/go.mod h1:eInP0b+U9XUJWSYeU9XZnTA+cVYuWyl3iYPGtru0qhQ= -k8s.io/kubelet v0.26.0 h1:08bDb5IoUH/1K1t2NUwnGIIWxjm9LSqn6k3FWw1tJGI= -k8s.io/kubelet v0.26.0/go.mod h1:DluF+d8jS2nE/Hs7CC3QM+OZlIEb22NTOihQ3EDwCQ4= -k8s.io/kubernetes v1.26.0 h1:fL8VMr4xlfTazPORLhz5fsvO5I3bsFpmynVxZTH1ItQ= -k8s.io/kubernetes v1.26.0/go.mod h1:z0aCJwn6DxzB/dDiWLbQaJO5jWOR2qoaCMnmSAx45XM= -k8s.io/legacy-cloud-providers v0.26.0 h1:PudIbGlvQLwSkden/vSfRE4YtsyKCRjQs0OAiP1w8M8= -k8s.io/legacy-cloud-providers v0.26.0/go.mod h1:dOOgYhHiMNWNla/XyM4Ppgjcrn3HulGa93/0vUO5/z4= -k8s.io/mount-utils v0.26.1-rc.0 h1:v7GKm3S5IdmcZvd7gM0QtANdVJRIPpycvgiT/o9y85I= -k8s.io/mount-utils v0.26.1-rc.0/go.mod h1:au99w4FWU5ZWelLb3Yx6kJc8RZ387IyWVM9tN65Yhxo= +k8s.io/kms v0.27.0-alpha.1 h1:BHLcJkYr4X05hWlolKoTT4MvBJOFXciVP3xcMQ/TlXE= +k8s.io/kms v0.27.0-alpha.1/go.mod h1:YD/RpQRK1iq1ur5ndhskkdKPgHqY0wfCDfEir3lxiFg= +k8s.io/kube-openapi v0.0.0-20230109183929-3758b55a6596 h1:8cNCQs+WqqnSpZ7y0LMQPKD+RZUHU17VqLPMW3qxnxc= +k8s.io/kube-openapi v0.0.0-20230109183929-3758b55a6596/go.mod h1:/BYxry62FuDzmI+i9B+X2pqfySRmSOW2ARmj5Zbqhj0= +k8s.io/kube-proxy v0.27.0-alpha.1 h1:qQ/YbTp7k/KxuAv95K/Yq1Ubp0+tW3iBRx8j63IxMgE= +k8s.io/kube-proxy v0.27.0-alpha.1/go.mod h1:CiQM8Qy1YEbiDgBREpV9GDmenP9X6ehe73KrvVYvrBQ= +k8s.io/kube-scheduler v0.27.0-alpha.1 h1:8raWcfrlDrKx983wqsdymE2ISlMNbBMPWXZXHA3TJII= +k8s.io/kube-scheduler v0.27.0-alpha.1/go.mod h1:MIZFneWs5tkB2Sn64Aw6NbzmpuwzseCb9BPa7GlnVLc= +k8s.io/kubectl v0.27.0-alpha.1 h1:ukrFyro2+pT+ElywCU8BkALLiTCl3mfQk8fnNqKKlzg= +k8s.io/kubectl v0.27.0-alpha.1/go.mod h1:jCBlSazqrwr8cAlPmXFDg6MjprKyb01L/gFb6frelvE= +k8s.io/kubelet v0.27.0-alpha.1 h1:areTiWI1hMKxRAXfuJvCQndu9sSqx0H/ooCj0nUzRhg= +k8s.io/kubelet v0.27.0-alpha.1/go.mod h1:ZeUHblI2ZNrQIZFPu8FK9NSQFG/i4c9uSi7qbpgbkqo= +k8s.io/kubernetes v1.27.0-alpha.1 h1:O9/j89TAORHqEyfhAEz3RFuVxMe6i3dcohqi40xg5HE= +k8s.io/kubernetes v1.27.0-alpha.1/go.mod h1:YakiZaor8p/y+3HitKFdE0FyRmjBfQbAjdpYWBgssZ4= +k8s.io/legacy-cloud-providers v0.27.0-alpha.1 h1:AtjVFtDFlwj/Cn23YQsoAecQlnnTb2lfBVJmsBAQjy0= +k8s.io/legacy-cloud-providers v0.27.0-alpha.1/go.mod h1:5jGVAF1MlPe/iRLYqtnkLIxK2MqqqUBQTcFlcZYEAY0= +k8s.io/mount-utils v0.27.0-alpha.1 h1:YDIpnmJxeossNar1BFyb/bdmEtpeC2mp/x3nz66oRc8= +k8s.io/mount-utils v0.27.0-alpha.1/go.mod h1:p03H0tBxdCqsrjayG3G6fq9whrWOHeFgoUkOI1F7VcQ= k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20221107191617-1a15be271d1d h1:0Smp/HP1OH4Rvhe+4B8nWGERtlqAGSftbSbbmm45oFs= k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.33 h1:LYqFq+6Cj2D0gFfrJvL7iElD4ET6ir3VDdhDdTK7rgc= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.33/go.mod h1:soWkSNf2tZC7aMibXEqVhCd73GOY5fJikn8qbdzemB0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.1 h1:MB1zkK+WMOmfLxEpjr1wEmkpcIhZC7kfTkZ0stg5bog= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.1/go.mod h1:/4NLd21PQY0B+H+X0aDZdwUiVXYJQl/2NXA5KVtDiP4= sigs.k8s.io/cloud-provider-azure v1.24.2 h1:t0c3Q7GAGQ0oqyl/KiHLtkS4obEYJpAMRYUuhEtgs/k= sigs.k8s.io/cloud-provider-azure v1.24.2/go.mod h1:uKqonMQbC2zqwq7NIWOfQLgrsMzD02Wj5UFFl1te1GY= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go index 27a62a723861..f46af33bb650 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go +++ b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go @@ -86,6 +86,12 @@ type Container interface { // container to be terminated by some error condition (including calling // Close). Wait() error + // WaitChannel returns the wait channel of the container + WaitChannel() <-chan struct{} + // WaitError returns the container termination error. + // This function should only be called after the channel in WaitChannel() + // is closed. Otherwise it is not thread safe. + WaitError() error // Modify sends a request to modify container resources Modify(ctx context.Context, config interface{}) error } diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go index 75499c967f0f..b478931c3259 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go +++ b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go @@ -275,11 +275,19 @@ func (computeSystem *System) waitBackground() { oc.SetSpanStatus(span, err) } +func (computeSystem *System) WaitChannel() <-chan struct{} { + return computeSystem.waitBlock +} + +func (computeSystem *System) WaitError() error { + return computeSystem.waitError +} + // Wait synchronously waits for the compute system to shutdown or terminate. If // the compute system has already exited returns the previous error (if any). func (computeSystem *System) Wait() error { - <-computeSystem.waitBlock - return computeSystem.waitError + <-computeSystem.WaitChannel() + return computeSystem.WaitError() } // ExitError returns an error describing the reason the compute system terminated. diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go index db59567d0895..859b753c246d 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go +++ b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go @@ -20,36 +20,41 @@ func Uint16BufferToSlice(buffer *uint16, bufferLength int) (result []uint16) { return } +// UnicodeString corresponds to UNICODE_STRING win32 struct defined here +// https://docs.microsoft.com/en-us/windows/win32/api/ntdef/ns-ntdef-_unicode_string type UnicodeString struct { Length uint16 MaximumLength uint16 Buffer *uint16 } +// NTSTRSAFE_UNICODE_STRING_MAX_CCH is a constant defined in ntstrsafe.h. This value +// denotes the maximum number of wide chars a path can have. +const NTSTRSAFE_UNICODE_STRING_MAX_CCH = 32767 + //String converts a UnicodeString to a golang string func (uni UnicodeString) String() string { // UnicodeString is not guaranteed to be null terminated, therefore // use the UnicodeString's Length field - return syscall.UTF16ToString(Uint16BufferToSlice(uni.Buffer, int(uni.Length/2))) + return windows.UTF16ToString(Uint16BufferToSlice(uni.Buffer, int(uni.Length/2))) } // NewUnicodeString allocates a new UnicodeString and copies `s` into // the buffer of the new UnicodeString. func NewUnicodeString(s string) (*UnicodeString, error) { - // Get length of original `s` to use in the UnicodeString since the `buf` - // created later will have an additional trailing null character - length := len(s) - if length > 32767 { - return nil, syscall.ENAMETOOLONG - } - buf, err := windows.UTF16FromString(s) if err != nil { return nil, err } + + if len(buf) > NTSTRSAFE_UNICODE_STRING_MAX_CCH { + return nil, syscall.ENAMETOOLONG + } + uni := &UnicodeString{ - Length: uint16(length * 2), - MaximumLength: uint16(length * 2), + // The length is in bytes and should not include the trailing null character. + Length: uint16((len(buf) - 1) * 2), + MaximumLength: uint16((len(buf) - 1) * 2), Buffer: &buf[0], } return uni, nil diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index d9aa3314a48e..2fefde190869 100644 --- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -23,13 +23,16 @@ const ( ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). ApNortheast3RegionID = "ap-northeast-3" // Asia Pacific (Osaka). ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). + ApSouth2RegionID = "ap-south-2" // Asia Pacific (Hyderabad). ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). ApSoutheast3RegionID = "ap-southeast-3" // Asia Pacific (Jakarta). CaCentral1RegionID = "ca-central-1" // Canada (Central). EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt). + EuCentral2RegionID = "eu-central-2" // Europe (Zurich). EuNorth1RegionID = "eu-north-1" // Europe (Stockholm). EuSouth1RegionID = "eu-south-1" // Europe (Milan). + EuSouth2RegionID = "eu-south-2" // Europe (Spain). EuWest1RegionID = "eu-west-1" // Europe (Ireland). EuWest2RegionID = "eu-west-2" // Europe (London). EuWest3RegionID = "eu-west-3" // Europe (Paris). @@ -157,6 +160,9 @@ var awsPartition = partition{ "ap-south-1": region{ Description: "Asia Pacific (Mumbai)", }, + "ap-south-2": region{ + Description: "Asia Pacific (Hyderabad)", + }, "ap-southeast-1": region{ Description: "Asia Pacific (Singapore)", }, @@ -172,12 +178,18 @@ var awsPartition = partition{ "eu-central-1": region{ Description: "Europe (Frankfurt)", }, + "eu-central-2": region{ + Description: "Europe (Zurich)", + }, "eu-north-1": region{ Description: "Europe (Stockholm)", }, "eu-south-1": region{ Description: "Europe (Milan)", }, + "eu-south-2": region{ + Description: "Europe (Spain)", + }, "eu-west-1": region{ Description: "Europe (Ireland)", }, @@ -237,6 +249,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -258,12 +273,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -399,6 +420,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -429,12 +453,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -1084,6 +1114,14 @@ var awsPartition = partition{ Region: "ap-south-1", }, }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "api.ecr.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{ @@ -1196,6 +1234,14 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "api.ecr.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, endpointKey{ Region: "eu-north-1", }: endpoint{ @@ -1212,6 +1258,14 @@ var awsPartition = partition{ Region: "eu-south-1", }, }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "api.ecr.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -1593,6 +1647,14 @@ var awsPartition = partition{ Region: "ap-southeast-2", }, }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "api.iotwireless.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -1601,6 +1663,14 @@ var awsPartition = partition{ Region: "eu-west-1", }, }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "api.iotwireless.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, endpointKey{ Region: "us-east-1", }: endpoint{ @@ -1960,6 +2030,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -1981,12 +2054,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -2142,6 +2221,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -2157,12 +2239,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -2339,6 +2427,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -2354,12 +2445,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -3026,56 +3123,183 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "arc-zonal-shift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "athena": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.af-south-1.api.aws", + }, endpointKey{ Region: "ap-east-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-east-1.api.aws", + }, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-northeast-1.api.aws", + }, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-northeast-2.api.aws", + }, endpointKey{ Region: "ap-northeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-northeast-3.api.aws", + }, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-south-1.api.aws", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-southeast-1.api.aws", + }, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-southeast-2.api.aws", + }, endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-southeast-3.api.aws", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ca-central-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-central-1.api.aws", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-north-1.api.aws", + }, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-south-1.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-west-1.api.aws", + }, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-west-2.api.aws", + }, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-west-3.api.aws", + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -3115,12 +3339,30 @@ var awsPartition = partition{ endpointKey{ Region: "me-south-1", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.me-south-1.api.aws", + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.sa-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", Variant: fipsVariant, @@ -3130,6 +3372,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-east-2.api.aws", + }, endpointKey{ Region: "us-east-2", Variant: fipsVariant, @@ -3139,6 +3387,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-1", Variant: fipsVariant, @@ -3148,6 +3402,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-west-2.api.aws", + }, endpointKey{ Region: "us-west-2", Variant: fipsVariant, @@ -3221,6 +3481,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -3236,12 +3499,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -3910,6 +4179,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -3931,12 +4203,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -3991,6 +4269,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -4086,6 +4367,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -4101,12 +4385,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -4355,6 +4645,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -4370,12 +4663,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -4812,6 +5111,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -4827,12 +5129,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -5849,6 +6157,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -5864,12 +6175,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -6283,6 +6600,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -6769,6 +7089,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -6985,6 +7308,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -7000,12 +7326,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -7213,6 +7545,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -7255,12 +7590,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -7640,6 +7981,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -7709,6 +8053,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -7739,12 +8086,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -7947,6 +8300,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -8022,6 +8378,9 @@ var awsPartition = partition{ }: endpoint{ Hostname: "ec2.ap-south-1.api.aws", }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -8043,12 +8402,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -8200,6 +8565,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -8215,12 +8583,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -8432,6 +8806,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -8496,6 +8873,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -8511,12 +8891,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -9038,6 +9424,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-me-central-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-me-south-1", }: endpoint{ @@ -9092,6 +9487,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.me-central-1.amazonaws.com", + }, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -9173,6 +9577,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -9188,12 +9595,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -9312,6 +9725,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -9335,12 +9751,18 @@ var awsPartition = partition{ }: endpoint{ SSLCommonName: "{service}.{region}.{dnsSuffix}", }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -9496,6 +9918,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -9855,6 +10280,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -9870,12 +10298,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -9997,6 +10431,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -10012,12 +10449,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -10283,6 +10726,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -10866,15 +11312,33 @@ var awsPartition = partition{ }, "fsx": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -10887,12 +11351,21 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, endpointKey{ Region: "fips-ca-central-1", }: endpoint{ @@ -10929,6 +11402,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-prod-us-west-1", + }: endpoint{ + Hostname: "fsx-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-prod-us-west-2", }: endpoint{ @@ -10956,6 +11438,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "fsx-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-west-2", }: endpoint{ @@ -10965,6 +11456,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, endpointKey{ Region: "prod-ca-central-1", }: endpoint{ @@ -11019,6 +11513,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "prod-us-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "prod-us-west-2", }: endpoint{ @@ -11037,6 +11549,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -11055,6 +11570,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "fsx-fips.us-east-2.amazonaws.com", }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -11135,6 +11659,9 @@ var awsPartition = partition{ }, "gamesparks": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -11561,6 +12088,22 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "dataplane-ap-south-1", + }: endpoint{ + Hostname: "greengrass-ats.iot.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "dataplane-us-east-2", + }: endpoint{ + Hostname: "greengrass-ats.iot.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -12041,6 +12584,7 @@ var awsPartition = partition{ CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, endpointKey{ Region: "ingest-fips-us-east-2", @@ -12049,6 +12593,7 @@ var awsPartition = partition{ CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, endpointKey{ Region: "ingest-fips-us-west-2", @@ -12057,6 +12602,61 @@ var awsPartition = partition{ CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ingest.timestream-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-east-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ingest.timestream-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-west-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ingest.timestream-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, }, endpointKey{ Region: "us-east-1", @@ -12326,6 +12926,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -12680,6 +13283,16 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "iotroborunner": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, "iotsecuredtunneling": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -13275,6 +13888,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -13290,12 +13906,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -13512,6 +14134,15 @@ var awsPartition = partition{ }, "kms": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ProdFips", + }: endpoint{ + Hostname: "kms-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "af-south-1", }: endpoint{}, @@ -13620,6 +14251,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-south-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-2-fips", + }: endpoint{ + Hostname: "kms-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -13710,6 +14359,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-central-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-2-fips", + }: endpoint{ + Hostname: "kms-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -13746,6 +14413,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-south-2.amazonaws.com", + }, endpointKey{ Region: "eu-south-2-fips", }: endpoint{ @@ -14120,6 +14796,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "lambda.ap-south-1.api.aws", }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-south-2.api.aws", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -14165,6 +14850,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "lambda.eu-central-1.api.aws", }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-central-2.api.aws", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -14183,6 +14877,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "lambda.eu-south-1.api.aws", }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-south-2.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -14249,6 +14952,12 @@ var awsPartition = partition{ endpointKey{ Region: "me-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.me-central-1.api.aws", + }, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -14652,6 +15361,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -14667,12 +15379,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -14836,6 +15554,15 @@ var awsPartition = partition{ }, "m2": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, @@ -14848,6 +15575,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -15585,12 +16318,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, endpointKey{ Region: "fips", }: endpoint{ @@ -15668,6 +16407,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -15683,12 +16425,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -16008,6 +16756,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -16023,12 +16774,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -16590,6 +17347,88 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "oam": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "oidc": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -17124,6 +17963,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -17139,12 +17981,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -17203,7 +18051,21 @@ var awsPartition = partition{ }: endpoint{}, endpointKey{ Region: "ca-central-1", - }: endpoint{}, + }: endpoint{ + Hostname: "pinpoint.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pinpoint-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -17213,6 +18075,15 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "pinpoint-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -17634,9 +18505,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -17892,6 +18781,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -17977,12 +18869,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -18104,6 +19002,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -18134,12 +19035,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -18496,6 +19403,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -18517,12 +19427,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -18978,6 +19894,124 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "resource-explorer-2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "resource-explorer-2.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "resource-explorer-2.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "resource-explorer-2.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "resource-explorer-2.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "resource-explorer-2.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "resource-explorer-2.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "resource-explorer-2.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "resource-explorer-2.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "resource-explorer-2.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "resource-explorer-2.ca-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "resource-explorer-2.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "resource-explorer-2.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "resource-explorer-2.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "resource-explorer-2.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "resource-explorer-2.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "resource-explorer-2.eu-west-3.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "resource-explorer-2.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "resource-explorer-2.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "resource-explorer-2.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "resource-explorer-2.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "resource-explorer-2.us-west-2.api.aws", + }, + }, + }, "resource-groups": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -19064,6 +20098,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -19695,6 +20732,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "s3.dualstack.ap-south-1.amazonaws.com", }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-south-2.amazonaws.com", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{ @@ -19769,6 +20815,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "s3.dualstack.eu-central-1.amazonaws.com", }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-central-2.amazonaws.com", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -19787,6 +20842,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "s3.dualstack.eu-south-1.amazonaws.com", }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-south-2.amazonaws.com", + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -20532,6 +21596,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -20637,6 +21704,37 @@ var awsPartition = partition{ }, }, }, + "scheduler": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "schemas": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -20748,6 +21846,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -20778,12 +21879,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -22240,6 +23347,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -22255,12 +23365,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -22379,6 +23495,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -22394,12 +23513,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -22515,6 +23640,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -22536,12 +23664,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -22776,6 +23910,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -22791,12 +23928,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -22963,6 +24106,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -23071,6 +24217,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -23086,12 +24235,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -23154,6 +24309,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -23177,12 +24335,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -23321,6 +24485,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -23336,12 +24503,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -23454,6 +24627,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -23469,12 +24645,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -23587,6 +24769,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -23602,12 +24787,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -24076,6 +25267,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -24294,6 +25488,67 @@ var awsPartition = partition{ }, }, }, + "voice-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.voice-chime.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "fips.voice-chime.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.voice-chime.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "fips.voice-chime.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, "voiceid": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -24305,22 +25560,46 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "voiceid-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "voiceid-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ - + Hostname: "voiceid-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, Deprecated: boxedTrue, }, endpointKey{ Region: "fips-us-west-2", }: endpoint{ - + Hostname: "voiceid-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, Deprecated: boxedTrue, }, endpointKey{ @@ -24329,14 +25608,18 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", Variant: fipsVariant, - }: endpoint{}, + }: endpoint{ + Hostname: "voiceid-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, endpointKey{ Region: "us-west-2", Variant: fipsVariant, - }: endpoint{}, + }: endpoint{ + Hostname: "voiceid-fips.us-west-2.amazonaws.com", + }, }, }, "waf": service{ @@ -25634,6 +26917,24 @@ var awsPartition = partition{ Deprecated: boxedTrue, }, + endpointKey{ + Region: "ui-ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ui-ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ui-eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "ui-eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "ui-us-east-1", + }: endpoint{}, + endpointKey{ + Region: "ui-us-west-2", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -25847,6 +27148,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -25862,12 +27166,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -26784,6 +28094,14 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-north-1", }: endpoint{}, + endpointKey{ + Region: "dataplane-cn-north-1", + }: endpoint{ + Hostname: "greengrass.ats.iot.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, }, }, "guardduty": service{ @@ -27133,6 +28451,29 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "resource-explorer-2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "resource-explorer-2.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "resource-explorer-2.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, "resource-groups": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -28863,6 +30204,16 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, + "controltower": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "data-ats.iot": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -30155,6 +31506,13 @@ var awsusgovPartition = partition{ }, }, }, + "ingest.timestream": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "inspector": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -31290,6 +32648,29 @@ var awsusgovPartition = partition{ }, }, }, + "resource-explorer-2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "resource-explorer-2.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "resource-explorer-2.us-gov-west-1.api.aws", + }, + }, + }, "resource-groups": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -33126,6 +34507,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "dynamodb": service{ @@ -33200,6 +34584,15 @@ var awsisoPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-iso-east-1", }: endpoint{}, @@ -33209,6 +34602,15 @@ var awsisoPartition = partition{ }: endpoint{ Hostname: "elasticfilesystem-fips.us-iso-east-1.c2s.ic.gov", }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-iso-west-1.c2s.ic.gov", + }, }, }, "elasticloadbalancing": service{ @@ -33260,6 +34662,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "glacier": service{ @@ -33531,6 +34936,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "states": service{ diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/version.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/version.go index fc2231d93ae1..2417675c264d 100644 --- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.116" +const SDKVersion = "1.44.147" diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go index ebcbc2b40a3f..34fea49ca819 100644 --- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go +++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go @@ -1,9 +1,8 @@ package shareddefaults import ( - "os" + "os/user" "path/filepath" - "runtime" ) // SharedCredentialsFilename returns the SDK's default file path @@ -31,10 +30,17 @@ func SharedConfigFilename() string { // UserHomeDir returns the home directory for the user the process is // running under. func UserHomeDir() string { - if runtime.GOOS == "windows" { // Windows - return os.Getenv("USERPROFILE") + var home string + + home = userHomeDir() + if len(home) > 0 { + return home + } + + currUser, _ := user.Current() + if currUser != nil { + home = currUser.HomeDir } - // *nix - return os.Getenv("HOME") + return home } diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go new file mode 100644 index 000000000000..eb298ae0fc1a --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go @@ -0,0 +1,18 @@ +//go:build !go1.12 +// +build !go1.12 + +package shareddefaults + +import ( + "os" + "runtime" +) + +func userHomeDir() string { + if runtime.GOOS == "windows" { // Windows + return os.Getenv("USERPROFILE") + } + + // *nix + return os.Getenv("HOME") +} diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go new file mode 100644 index 000000000000..51541b508766 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go @@ -0,0 +1,13 @@ +//go:build go1.12 +// +build go1.12 + +package shareddefaults + +import ( + "os" +) + +func userHomeDir() string { + home, _ := os.UserHomeDir() + return home +} diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go index 2aec80661a43..12e814ddf256 100644 --- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go +++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go @@ -4,7 +4,6 @@ package jsonutil import ( "bytes" "encoding/base64" - "encoding/json" "fmt" "math" "reflect" @@ -16,6 +15,12 @@ import ( "github.com/aws/aws-sdk-go/private/protocol" ) +const ( + floatNaN = "NaN" + floatInf = "Infinity" + floatNegInf = "-Infinity" +) + var timeType = reflect.ValueOf(time.Time{}).Type() var byteSliceType = reflect.ValueOf([]byte{}).Type() @@ -211,10 +216,16 @@ func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) erro buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10)) case reflect.Float64: f := value.Float() - if math.IsInf(f, 0) || math.IsNaN(f) { - return &json.UnsupportedValueError{Value: v, Str: strconv.FormatFloat(f, 'f', -1, 64)} + switch { + case math.IsNaN(f): + writeString(floatNaN, buf) + case math.IsInf(f, 1): + writeString(floatInf, buf) + case math.IsInf(f, -1): + writeString(floatNegInf, buf) + default: + buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)) } - buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)) default: switch converted := value.Interface().(type) { case time.Time: diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go index 8b2c9bbeba0c..f9334879b806 100644 --- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go +++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go @@ -6,6 +6,7 @@ import ( "encoding/json" "fmt" "io" + "math" "math/big" "reflect" "strings" @@ -258,6 +259,18 @@ func (u unmarshaler) unmarshalScalar(value reflect.Value, data interface{}, tag return err } value.Set(reflect.ValueOf(v)) + case *float64: + // These are regular strings when parsed by encoding/json's unmarshaler. + switch { + case strings.EqualFold(d, floatNaN): + value.Set(reflect.ValueOf(aws.Float64(math.NaN()))) + case strings.EqualFold(d, floatInf): + value.Set(reflect.ValueOf(aws.Float64(math.Inf(1)))) + case strings.EqualFold(d, floatNegInf): + value.Set(reflect.ValueOf(aws.Float64(math.Inf(-1)))) + default: + return fmt.Errorf("unknown JSON number value: %s", d) + } default: return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) } diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go index 75866d012184..058334053c24 100644 --- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go +++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -3,6 +3,7 @@ package queryutil import ( "encoding/base64" "fmt" + "math" "net/url" "reflect" "sort" @@ -13,6 +14,12 @@ import ( "github.com/aws/aws-sdk-go/private/protocol" ) +const ( + floatNaN = "NaN" + floatInf = "Infinity" + floatNegInf = "-Infinity" +) + // Parse parses an object i and fills a url.Values object. The isEC2 flag // indicates if this is the EC2 Query sub-protocol. func Parse(body url.Values, i interface{}, isEC2 bool) error { @@ -228,9 +235,32 @@ func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, ta case int: v.Set(name, strconv.Itoa(value)) case float64: - v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) + var str string + switch { + case math.IsNaN(value): + str = floatNaN + case math.IsInf(value, 1): + str = floatInf + case math.IsInf(value, -1): + str = floatNegInf + default: + str = strconv.FormatFloat(value, 'f', -1, 64) + } + v.Set(name, str) case float32: - v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) + asFloat64 := float64(value) + var str string + switch { + case math.IsNaN(asFloat64): + str = floatNaN + case math.IsInf(asFloat64, 1): + str = floatInf + case math.IsInf(asFloat64, -1): + str = floatNegInf + default: + str = strconv.FormatFloat(asFloat64, 'f', -1, 32) + } + v.Set(name, str) case time.Time: const ISO8601UTC = "2006-01-02T15:04:05Z" format := tag.Get("timestampFormat") diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go index 63f66af2c62f..1d273ff0ec6e 100644 --- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -6,6 +6,7 @@ import ( "encoding/base64" "fmt" "io" + "math" "net/http" "net/url" "path" @@ -20,6 +21,12 @@ import ( "github.com/aws/aws-sdk-go/private/protocol" ) +const ( + floatNaN = "NaN" + floatInf = "Infinity" + floatNegInf = "-Infinity" +) + // Whether the byte value can be sent without escaping in AWS URLs var noEscape [256]bool @@ -302,7 +309,16 @@ func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) case int64: str = strconv.FormatInt(value, 10) case float64: - str = strconv.FormatFloat(value, 'f', -1, 64) + switch { + case math.IsNaN(value): + str = floatNaN + case math.IsInf(value, 1): + str = floatInf + case math.IsInf(value, -1): + str = floatNegInf + default: + str = strconv.FormatFloat(value, 'f', -1, 64) + } case time.Time: format := tag.Get("timestampFormat") if len(format) == 0 { diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go index cdef403e219c..79fcf1699b78 100644 --- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "io/ioutil" + "math" "net/http" "reflect" "strconv" @@ -231,9 +232,20 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro } v.Set(reflect.ValueOf(&i)) case *float64: - f, err := strconv.ParseFloat(header, 64) - if err != nil { - return err + var f float64 + switch { + case strings.EqualFold(header, floatNaN): + f = math.NaN() + case strings.EqualFold(header, floatInf): + f = math.Inf(1) + case strings.EqualFold(header, floatNegInf): + f = math.Inf(-1) + default: + var err error + f, err = strconv.ParseFloat(header, 64) + if err != nil { + return err + } } v.Set(reflect.ValueOf(&f)) case *time.Time: diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go index 2fbb93ae76ad..58c12bd8ccbc 100644 --- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go +++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -5,6 +5,7 @@ import ( "encoding/base64" "encoding/xml" "fmt" + "math" "reflect" "sort" "strconv" @@ -14,6 +15,12 @@ import ( "github.com/aws/aws-sdk-go/private/protocol" ) +const ( + floatNaN = "NaN" + floatInf = "Infinity" + floatNegInf = "-Infinity" +) + // BuildXML will serialize params into an xml.Encoder. Error will be returned // if the serialization of any of the params or nested values fails. func BuildXML(params interface{}, e *xml.Encoder) error { @@ -275,6 +282,7 @@ func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect // Error will be returned if the value type is unsupported. func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { var str string + switch converted := value.Interface().(type) { case string: str = converted @@ -289,9 +297,29 @@ func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag refl case int: str = strconv.Itoa(converted) case float64: - str = strconv.FormatFloat(converted, 'f', -1, 64) + switch { + case math.IsNaN(converted): + str = floatNaN + case math.IsInf(converted, 1): + str = floatInf + case math.IsInf(converted, -1): + str = floatNegInf + default: + str = strconv.FormatFloat(converted, 'f', -1, 64) + } case float32: - str = strconv.FormatFloat(float64(converted), 'f', -1, 32) + // The SDK doesn't render float32 values in types, only float64. This case would never be hit currently. + asFloat64 := float64(converted) + switch { + case math.IsNaN(asFloat64): + str = floatNaN + case math.IsInf(asFloat64, 1): + str = floatInf + case math.IsInf(asFloat64, -1): + str = floatNegInf + default: + str = strconv.FormatFloat(asFloat64, 'f', -1, 32) + } case time.Time: format := tag.Get("timestampFormat") if len(format) == 0 { diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go index 107c053f8acf..44a580a940b9 100644 --- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go +++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -6,6 +6,7 @@ import ( "encoding/xml" "fmt" "io" + "math" "reflect" "strconv" "strings" @@ -276,9 +277,20 @@ func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { } r.Set(reflect.ValueOf(&v)) case *float64: - v, err := strconv.ParseFloat(node.Text, 64) - if err != nil { - return err + var v float64 + switch { + case strings.EqualFold(node.Text, floatNaN): + v = math.NaN() + case strings.EqualFold(node.Text, floatInf): + v = math.Inf(1) + case strings.EqualFold(node.Text, floatNegInf): + v = math.Inf(-1) + default: + var err error + v, err = strconv.ParseFloat(node.Text, 64) + if err != nil { + return err + } } r.Set(reflect.ValueOf(&v)) case *time.Time: diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go index 380b62c0abc7..33c06a6589ed 100644 --- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go +++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go @@ -859,6 +859,13 @@ func (c *AutoScaling) CreateLaunchConfigurationRequest(input *CreateLaunchConfig // For more information, see Launch configurations (https://docs.aws.amazon.com/autoscaling/ec2/userguide/LaunchConfiguration.html) // in the Amazon EC2 Auto Scaling User Guide. // +// Amazon EC2 Auto Scaling configures instances launched as part of an Auto +// Scaling group using either a launch template or a launch configuration. We +// strongly recommend that you do not use launch configurations. They do not +// provide full functionality for Amazon EC2 Auto Scaling or Amazon EC2. For +// information about using launch templates, see Launch templates (https://docs.aws.amazon.com/autoscaling/ec2/userguide/launch-templates.html) +// in the Amazon EC2 Auto Scaling User Guide. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1051,10 +1058,9 @@ func (c *AutoScaling) DeleteAutoScalingGroupRequest(input *DeleteAutoScalingGrou // Deletes the specified Auto Scaling group. // // If the group has instances or scaling activities in progress, you must specify -// the option to force the deletion in order for it to succeed. -// -// If the group has policies, deleting the group deletes the policies, the underlying -// alarm actions, and any alarm that no longer has an associated action. +// the option to force the deletion in order for it to succeed. The force delete +// operation will also terminate the EC2 instances. If the group has a warm +// pool, the force delete option also deletes the warm pool. // // To remove instances from the Auto Scaling group before deleting it, call // the DetachInstances API with the list of instances and the option to decrement @@ -1065,6 +1071,13 @@ func (c *AutoScaling) DeleteAutoScalingGroupRequest(input *DeleteAutoScalingGrou // UpdateAutoScalingGroup API and set the minimum size and desired capacity // of the Auto Scaling group to zero. // +// If the group has scaling policies, deleting the group deletes the policies, +// the underlying alarm actions, and any alarm that no longer has an associated +// action. +// +// For more information, see Delete your Auto Scaling infrastructure (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-process-shutdown.html) +// in the Amazon EC2 Auto Scaling User Guide. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -7327,6 +7340,11 @@ type CreateAutoScalingGroupInput struct { // The name of the Auto Scaling group. This name must be unique per Region per // account. // + // The name can contain any ASCII character 33 to 126 including most punctuation + // characters, digits, and upper and lowercased letters. + // + // You cannot use a colon (:) in the name. + // // AutoScalingGroupName is a required field AutoScalingGroupName *string `min:"1" type:"string" required:"true"` @@ -7405,7 +7423,7 @@ type CreateAutoScalingGroupInput struct { // and marking it unhealthy due to a failed Elastic Load Balancing or custom // health check. This is useful if your instances do not immediately pass these // health checks after they enter the InService state. For more information, - // see Health check grace period (https://docs.aws.amazon.com/autoscaling/ec2/userguide/healthcheck.html#health-check-grace-period) + // see Set the health check grace period for an Auto Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/health-check-grace-period.html) // in the Amazon EC2 Auto Scaling User Guide. // // Default: 0 seconds @@ -7480,10 +7498,8 @@ type CreateAutoScalingGroupInput struct { // MinSize is a required field MinSize *int64 `type:"integer" required:"true"` - // An embedded object that specifies a mixed instances policy. - // - // For more information, see Auto Scaling groups with multiple instance types - // and purchase options (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-mixed-instances-groups.html) + // The mixed instances policy. For more information, see Auto Scaling groups + // with multiple instance types and purchase options (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-mixed-instances-groups.html) // in the Amazon EC2 Auto Scaling User Guide. MixedInstancesPolicy *MixedInstancesPolicy `type:"structure"` @@ -7842,21 +7858,10 @@ type CreateLaunchConfigurationInput struct { // in the Amazon EC2 User Guide for Linux Instances. BlockDeviceMappings []*BlockDeviceMapping `type:"list"` - // EC2-Classic retires on August 15, 2022. This property is not supported after - // that date. - // - // The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to. - // For more information, see ClassicLink (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) - // in the Amazon EC2 User Guide for Linux Instances. + // Available for backward compatibility. ClassicLinkVPCId *string `min:"1" type:"string"` - // EC2-Classic retires on August 15, 2022. This property is not supported after - // that date. - // - // The IDs of one or more security groups for the specified ClassicLink-enabled - // VPC. - // - // If you specify the ClassicLinkVPCId property, you must specify ClassicLinkVPCSecurityGroups. + // Available for backward compatibility. ClassicLinkVPCSecurityGroups []*string `type:"list"` // Specifies whether the launch configuration is optimized for EBS I/O (true) @@ -10954,11 +10959,13 @@ type DesiredConfiguration struct { // in the Amazon EC2 Auto Scaling User Guide. LaunchTemplate *LaunchTemplateSpecification `type:"structure"` - // Describes a mixed instances policy. A mixed instances policy contains the - // instance types that Amazon EC2 Auto Scaling can launch and other information - // that Amazon EC2 Auto Scaling can use to launch instances and help optimize - // your costs. For more information, see Auto Scaling groups with multiple instance - // types and purchase options (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-mixed-instances-groups.html) + // Use this structure to launch multiple instance types and On-Demand Instances + // and Spot Instances within a single Auto Scaling group. + // + // A mixed instances policy contains information that Amazon EC2 Auto Scaling + // can use to launch instances and help optimize your costs. For more information, + // see Auto Scaling groups with multiple instance types and purchase options + // (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-mixed-instances-groups.html) // in the Amazon EC2 Auto Scaling User Guide. MixedInstancesPolicy *MixedInstancesPolicy `type:"structure"` } @@ -13431,15 +13438,34 @@ func (s *InstanceRefreshWarmPoolProgress) SetPercentageComplete(v int64) *Instan return s } -// When you specify multiple parameters, you get instance types that satisfy -// all of the specified parameters. If you specify multiple values for a parameter, +// The attributes for the instance types for a mixed instances policy. Amazon +// EC2 Auto Scaling uses your specified requirements to identify instance types. +// Then, it uses your On-Demand and Spot allocation strategies to launch instances +// from these instance types. +// +// When you specify multiple attributes, you get instance types that satisfy +// all of the specified attributes. If you specify multiple values for an attribute, // you get instance types that satisfy any of the specified values. // -// Represents requirements for the types of instances that can be launched. -// You must specify VCpuCount and MemoryMiB, but all other parameters are optional. +// To limit the list of instance types from which Amazon EC2 Auto Scaling can +// identify matching instance types, you can use one of the following parameters, +// but not both in the same request: +// +// - AllowedInstanceTypes - The instance types to include in the list. All +// other instance types are ignored, even if they match your specified attributes. +// +// - ExcludedInstanceTypes - The instance types to exclude from the list, +// even if they match your specified attributes. +// +// You must specify VCpuCount and MemoryMiB. All other attributes are optional. +// Any unspecified optional attribute is set to its default. +// // For more information, see Creating an Auto Scaling group using attribute-based // instance type selection (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-asg-instance-type-requirements.html) -// in the Amazon EC2 Auto Scaling User Guide. +// in the Amazon EC2 Auto Scaling User Guide. For help determining which instance +// types match your attributes before you apply them to your Auto Scaling group, +// see Preview instance types with specified attributes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-attribute-based-instance-type-selection.html#ec2fleet-get-instance-types-from-instance-requirements) +// in the Amazon EC2 User Guide for Linux Instances. type InstanceRequirements struct { _ struct{} `type:"structure"` @@ -13448,7 +13474,7 @@ type InstanceRequirements struct { // // To exclude accelerator-enabled instance types, set Max to 0. // - // Default: No minimum or maximum + // Default: No minimum or maximum limits AcceleratorCount *AcceleratorCountRequest `type:"structure"` // Indicates whether instance types must have accelerators by specific manufacturers. @@ -13486,7 +13512,7 @@ type InstanceRequirements struct { // The minimum and maximum total memory size for the accelerators on an instance // type, in MiB. // - // Default: No minimum or maximum + // Default: No minimum or maximum limits AcceleratorTotalMemoryMiB *AcceleratorTotalMemoryMiBRequest `type:"structure"` // Lists the accelerator types that must be on an instance type. @@ -13500,6 +13526,23 @@ type InstanceRequirements struct { // Default: Any accelerator type AcceleratorTypes []*string `type:"list" enum:"AcceleratorType"` + // The instance types to apply your specified attributes against. All other + // instance types are ignored, even if they match your specified attributes. + // + // You can use strings with one or more wild cards, represented by an asterisk + // (*), to allow an instance type, size, or generation. The following are examples: + // m5.8xlarge, c5*.*, m5a.*, r*, *3*. + // + // For example, if you specify c5*, Amazon EC2 Auto Scaling will allow the entire + // C5 instance family, which includes all C5a and C5n instance types. If you + // specify m5a.*, Amazon EC2 Auto Scaling will allow all the M5a instance types, + // but not the M5n instance types. + // + // If you specify AllowedInstanceTypes, you can't specify ExcludedInstanceTypes. + // + // Default: All instance types + AllowedInstanceTypes []*string `type:"list"` + // Indicates whether bare metal instance types are included, excluded, or required. // // Default: excluded @@ -13509,7 +13552,7 @@ type InstanceRequirements struct { // in Mbps. For more information, see Amazon EBS–optimized instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-optimized.html) // in the Amazon EC2 User Guide for Linux Instances. // - // Default: No minimum or maximum + // Default: No minimum or maximum limits BaselineEbsBandwidthMbps *BaselineEbsBandwidthMbpsRequest `type:"structure"` // Indicates whether burstable performance instance types are included, excluded, @@ -13534,13 +13577,17 @@ type InstanceRequirements struct { // Default: Any manufacturer CpuManufacturers []*string `type:"list" enum:"CpuManufacturer"` - // Lists which instance types to exclude. You can use strings with one or more - // wild cards, represented by an asterisk (*). The following are examples: c5*, - // m5a.*, r*, *3*. + // The instance types to exclude. You can use strings with one or more wild + // cards, represented by an asterisk (*), to exclude an instance family, type, + // size, or generation. The following are examples: m5.8xlarge, c5*.*, m5a.*, + // r*, *3*. // // For example, if you specify c5*, you are excluding the entire C5 instance // family, which includes all C5a and C5n instance types. If you specify m5a.*, - // you are excluding all the M5a instance types, but not the M5n instance types. + // Amazon EC2 Auto Scaling will exclude all the M5a instance types, but not + // the M5n instance types. + // + // If you specify ExcludedInstanceTypes, you can't specify AllowedInstanceTypes. // // Default: No excluded instance types ExcludedInstanceTypes []*string `type:"list"` @@ -13578,7 +13625,7 @@ type InstanceRequirements struct { // The minimum and maximum amount of memory per vCPU for an instance type, in // GiB. // - // Default: No minimum or maximum + // Default: No minimum or maximum limits MemoryGiBPerVCpu *MemoryGiBPerVCpuRequest `type:"structure"` // The minimum and maximum instance memory size for an instance type, in MiB. @@ -13586,9 +13633,15 @@ type InstanceRequirements struct { // MemoryMiB is a required field MemoryMiB *MemoryMiBRequest `type:"structure" required:"true"` + // The minimum and maximum amount of network bandwidth, in gigabits per second + // (Gbps). + // + // Default: No minimum or maximum limits + NetworkBandwidthGbps *NetworkBandwidthGbpsRequest `type:"structure"` + // The minimum and maximum number of network interfaces for an instance type. // - // Default: No minimum or maximum + // Default: No minimum or maximum limits NetworkInterfaceCount *NetworkInterfaceCountRequest `type:"structure"` // The price protection threshold for On-Demand Instances. This is the maximum @@ -13632,7 +13685,7 @@ type InstanceRequirements struct { // The minimum and maximum total local storage size for an instance type, in // GB. // - // Default: No minimum or maximum + // Default: No minimum or maximum limits TotalLocalStorageGB *TotalLocalStorageGBRequest `type:"structure"` // The minimum and maximum number of vCPUs for an instance type. @@ -13715,6 +13768,12 @@ func (s *InstanceRequirements) SetAcceleratorTypes(v []*string) *InstanceRequire return s } +// SetAllowedInstanceTypes sets the AllowedInstanceTypes field's value. +func (s *InstanceRequirements) SetAllowedInstanceTypes(v []*string) *InstanceRequirements { + s.AllowedInstanceTypes = v + return s +} + // SetBareMetal sets the BareMetal field's value. func (s *InstanceRequirements) SetBareMetal(v string) *InstanceRequirements { s.BareMetal = &v @@ -13775,6 +13834,12 @@ func (s *InstanceRequirements) SetMemoryMiB(v *MemoryMiBRequest) *InstanceRequir return s } +// SetNetworkBandwidthGbps sets the NetworkBandwidthGbps field's value. +func (s *InstanceRequirements) SetNetworkBandwidthGbps(v *NetworkBandwidthGbpsRequest) *InstanceRequirements { + s.NetworkBandwidthGbps = v + return s +} + // SetNetworkInterfaceCount sets the NetworkInterfaceCount field's value. func (s *InstanceRequirements) SetNetworkInterfaceCount(v *NetworkInterfaceCountRequest) *InstanceRequirements { s.NetworkInterfaceCount = v @@ -13847,36 +13912,45 @@ func (s *InstanceReusePolicy) SetReuseOnScaleIn(v bool) *InstanceReusePolicy { return s } -// Describes an instances distribution for an Auto Scaling group. +// Use this structure to specify the distribution of On-Demand Instances and +// Spot Instances and the allocation strategies used to fulfill On-Demand and +// Spot capacities for a mixed instances policy. type InstancesDistribution struct { _ struct{} `type:"structure"` - // The order of the launch template overrides to use in fulfilling On-Demand - // capacity. + // The allocation strategy to apply to your On-Demand Instances when they are + // launched. Possible instance types are determined by the launch template overrides + // that you specify. + // + // The following lists the valid values: // - // If you specify lowest-price, Amazon EC2 Auto Scaling uses price to determine - // the order, launching the lowest price first. + // lowest-price // - // If you specify prioritized, Amazon EC2 Auto Scaling uses the priority that - // you assigned to each launch template override, launching the highest priority - // first. If all your On-Demand capacity cannot be fulfilled using your highest - // priority instance, then Amazon EC2 Auto Scaling launches the remaining capacity - // using the second priority instance type, and so on. + // Uses price to determine which instance types are the highest priority, launching + // the lowest priced instance types within an Availability Zone first. This + // is the default value for Auto Scaling groups that specify InstanceRequirements. // - // Default: lowest-price for Auto Scaling groups that specify InstanceRequirements - // in the overrides and prioritized for Auto Scaling groups that don't. + // prioritized // - // Valid values: lowest-price | prioritized + // You set the order of instance types for the launch template overrides from + // highest to lowest priority (from first to last in the list). Amazon EC2 Auto + // Scaling launches your highest priority instance types first. If all your + // On-Demand capacity cannot be fulfilled using your highest priority instance + // type, then Amazon EC2 Auto Scaling launches the remaining capacity using + // the second priority instance type, and so on. This is the default value for + // Auto Scaling groups that don't specify InstanceRequirements and cannot be + // used for groups that do. OnDemandAllocationStrategy *string `type:"string"` // The minimum amount of the Auto Scaling group's capacity that must be fulfilled // by On-Demand Instances. This base portion is launched first as your group // scales. // - // If you specify weights for the instance types in the overrides, the base - // capacity is measured in the same unit of measurement as the instance types. - // If you specify InstanceRequirements in the overrides, the base capacity is - // measured in the same unit of measurement as your group's desired capacity. + // This number has the same unit of measurement as the group's desired capacity. + // If you change the default unit of measurement (number of instances) by specifying + // weighted capacity values in your launch template overrides list, or by changing + // the default desired capacity type setting of the group, you must specify + // this number using the same unit of measurement. // // Default: 0 OnDemandBaseCapacity *int64 `type:"integer"` @@ -13889,41 +13963,63 @@ type InstancesDistribution struct { // Default: 100 OnDemandPercentageAboveBaseCapacity *int64 `type:"integer"` - // Indicates how to allocate instances across Spot Instance pools. + // The allocation strategy to apply to your Spot Instances when they are launched. + // Possible instance types are determined by the launch template overrides that + // you specify. // - // If the allocation strategy is lowest-price, the Auto Scaling group launches - // instances using the Spot pools with the lowest price, and evenly allocates - // your instances across the number of Spot pools that you specify. + // The following lists the valid values: // - // If the allocation strategy is capacity-optimized (recommended), the Auto - // Scaling group launches instances using Spot pools that are optimally chosen - // based on the available Spot capacity. Alternatively, you can use capacity-optimized-prioritized - // and set the order of instance types in the list of launch template overrides - // from highest to lowest priority (from first to last in the list). Amazon - // EC2 Auto Scaling honors the instance type priorities on a best-effort basis - // but optimizes for capacity first. + // capacity-optimized // - // Default: lowest-price + // Requests Spot Instances using pools that are optimally chosen based on the + // available Spot capacity. This strategy has the lowest risk of interruption. + // To give certain instance types a higher chance of launching first, use capacity-optimized-prioritized. // - // Valid values: lowest-price | capacity-optimized | capacity-optimized-prioritized + // capacity-optimized-prioritized + // + // You set the order of instance types for the launch template overrides from + // highest to lowest priority (from first to last in the list). Amazon EC2 Auto + // Scaling honors the instance type priorities on a best effort basis but optimizes + // for capacity first. Note that if the On-Demand allocation strategy is set + // to prioritized, the same priority is applied when fulfilling On-Demand capacity. + // This is not a valid value for Auto Scaling groups that specify InstanceRequirements. + // + // lowest-price + // + // Requests Spot Instances using the lowest priced pools within an Availability + // Zone, across the number of Spot pools that you specify for the SpotInstancePools + // property. To ensure that your desired capacity is met, you might receive + // Spot Instances from several pools. This is the default value, but it might + // lead to high interruption rates because this strategy only considers instance + // price and not available capacity. + // + // price-capacity-optimized (recommended) + // + // Amazon EC2 Auto Scaling identifies the pools with the highest capacity availability + // for the number of instances that are launching. This means that we will request + // Spot Instances from the pools that we believe have the lowest chance of interruption + // in the near term. Amazon EC2 Auto Scaling then requests Spot Instances from + // the lowest priced of these pools. SpotAllocationStrategy *string `type:"string"` // The number of Spot Instance pools across which to allocate your Spot Instances. // The Spot pools are determined from the different instance types in the overrides. - // Valid only when the Spot allocation strategy is lowest-price. Value must - // be in the range of 1–20. + // Valid only when the SpotAllocationStrategy is lowest-price. Value must be + // in the range of 1–20. // // Default: 2 SpotInstancePools *int64 `type:"integer"` // The maximum price per unit hour that you are willing to pay for a Spot Instance. - // If you keep the value at its default (unspecified), Amazon EC2 Auto Scaling - // uses the On-Demand price as the maximum Spot price. To remove a value that - // you previously set, include the property but specify an empty string ("") - // for the value. - // // If your maximum price is lower than the Spot price for the instance types - // that you selected, your Spot Instances are not launched. + // that you selected, your Spot Instances are not launched. We do not recommend + // specifying a maximum price because it can lead to increased interruptions. + // When Spot Instances launch, you pay the current Spot price. To remove a maximum + // price that you previously set, include the property but specify an empty + // string ("") for the value. + // + // If you specify a maximum price, your instances will be interrupted more frequently + // than if you do not specify one. // // Valid Range: Minimum value of 0.001 SpotMaxPrice *string `type:"string"` @@ -14004,16 +14100,10 @@ type LaunchConfiguration struct { // in the Amazon EC2 User Guide for Linux Instances. BlockDeviceMappings []*BlockDeviceMapping `type:"list"` - // EC2-Classic retires on August 15, 2022. This property is not supported after - // that date. - // - // The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to. + // Available for backward compatibility. ClassicLinkVPCId *string `min:"1" type:"string"` - // EC2-Classic retires on August 15, 2022. This property is not supported after - // that date. - // - // The IDs of one or more security groups for the VPC specified in ClassicLinkVPCId. + // Available for backward compatibility. ClassicLinkVPCSecurityGroups []*string `type:"list"` // The creation date and time for the launch configuration. @@ -14250,21 +14340,16 @@ func (s *LaunchConfiguration) SetUserData(v string) *LaunchConfiguration { return s } -// Describes a launch template and overrides. You specify these properties as -// part of a mixed instances policy. +// Use this structure to specify the launch templates and instance types (overrides) +// for a mixed instances policy. type LaunchTemplate struct { _ struct{} `type:"structure"` - // The launch template to use. + // The launch template. LaunchTemplateSpecification *LaunchTemplateSpecification `type:"structure"` // Any properties that you specify override the same properties in the launch - // template. If not provided, Amazon EC2 Auto Scaling uses the instance type - // or instance type requirements specified in the launch template when it launches - // an instance. - // - // The overrides can include either one or more instance types or a set of instance - // requirements, but not both. + // template. Overrides []*LaunchTemplateOverrides `type:"list"` } @@ -14323,45 +14408,82 @@ func (s *LaunchTemplate) SetOverrides(v []*LaunchTemplateOverrides) *LaunchTempl return s } -// Describes an override for a launch template. For more information, see Configuring -// overrides (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-configuring-overrides.html) -// in the Amazon EC2 Auto Scaling User Guide. +// Use this structure to let Amazon EC2 Auto Scaling do the following when the +// Auto Scaling group has a mixed instances policy: +// +// - Override the instance type that is specified in the launch template. +// +// - Use multiple instance types. +// +// Specify the instance types that you want, or define your instance requirements +// instead and let Amazon EC2 Auto Scaling provision the available instance +// types that meet your requirements. This can provide Amazon EC2 Auto Scaling +// with a larger selection of instance types to choose from when fulfilling +// Spot and On-Demand capacities. You can view which instance types are matched +// before you apply the instance requirements to your Auto Scaling group. +// +// After you define your instance requirements, you don't have to keep updating +// these settings to get new EC2 instance types automatically. Amazon EC2 Auto +// Scaling uses the instance requirements of the Auto Scaling group to determine +// whether a new EC2 instance type can be used. type LaunchTemplateOverrides struct { _ struct{} `type:"structure"` - // The instance requirements. When you specify instance requirements, Amazon - // EC2 Auto Scaling finds instance types that satisfy your requirements, and - // then uses your On-Demand and Spot allocation strategies to launch instances - // from these instance types, in the same way as when you specify a list of - // specific instance types. + // The instance requirements. Amazon EC2 Auto Scaling uses your specified requirements + // to identify instance types. Then, it uses your On-Demand and Spot allocation + // strategies to launch instances from these instance types. + // + // You can specify up to four separate sets of instance requirements per Auto + // Scaling group. This is useful for provisioning instances from different Amazon + // Machine Images (AMIs) in the same Auto Scaling group. To do this, create + // the AMIs and create a new launch template for each AMI. Then, create a compatible + // set of instance requirements for each launch template. + // + // If you specify InstanceRequirements, you can't specify InstanceType. InstanceRequirements *InstanceRequirements `type:"structure"` - // The instance type, such as m3.xlarge. You must use an instance type that + // The instance type, such as m3.xlarge. You must specify an instance type that // is supported in your requested Region and Availability Zones. For more information, // see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) // in the Amazon Elastic Compute Cloud User Guide. + // + // You can specify up to 40 instance types per Auto Scaling group. InstanceType *string `min:"1" type:"string"` - // Provides a launch template for the specified instance type or instance requirements. - // For example, some instance types might require a launch template with a different - // AMI. If not provided, Amazon EC2 Auto Scaling uses the launch template that's - // defined for your mixed instances policy. For more information, see Specifying - // a different launch template for an instance type (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-mixed-instances-groups-launch-template-overrides.html) + // Provides a launch template for the specified instance type or set of instance + // requirements. For example, some instance types might require a launch template + // with a different AMI. If not provided, Amazon EC2 Auto Scaling uses the launch + // template that's specified in the LaunchTemplate definition. For more information, + // see Specifying a different launch template for an instance type (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-mixed-instances-groups-launch-template-overrides.html) // in the Amazon EC2 Auto Scaling User Guide. + // + // You can specify up to 20 launch templates per Auto Scaling group. The launch + // templates specified in the overrides and in the LaunchTemplate definition + // count towards this limit. LaunchTemplateSpecification *LaunchTemplateSpecification `type:"structure"` - // The number of capacity units provided by the instance type specified in InstanceType - // in terms of virtual CPUs, memory, storage, throughput, or other relative - // performance characteristic. When a Spot or On-Demand Instance is launched, - // the capacity units count toward the desired capacity. Amazon EC2 Auto Scaling - // launches instances until the desired capacity is totally fulfilled, even - // if this results in an overage. For example, if there are two units remaining - // to fulfill capacity, and Amazon EC2 Auto Scaling can only launch an instance - // with a WeightedCapacity of five units, the instance is launched, and the - // desired capacity is exceeded by three units. For more information, see Configuring - // instance weighting for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-mixed-instances-groups-instance-weighting.html) + // If you provide a list of instance types to use, you can specify the number + // of capacity units provided by each instance type in terms of virtual CPUs, + // memory, storage, throughput, or other relative performance characteristic. + // When a Spot or On-Demand Instance is launched, the capacity units count toward + // the desired capacity. Amazon EC2 Auto Scaling launches instances until the + // desired capacity is totally fulfilled, even if this results in an overage. + // For example, if there are two units remaining to fulfill capacity, and Amazon + // EC2 Auto Scaling can only launch an instance with a WeightedCapacity of five + // units, the instance is launched, and the desired capacity is exceeded by + // three units. For more information, see Configuring instance weighting for + // Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-mixed-instances-groups-instance-weighting.html) // in the Amazon EC2 Auto Scaling User Guide. Value must be in the range of // 1–999. + // + // If you specify a value for WeightedCapacity for one instance type, you must + // specify a value for WeightedCapacity for all of them. + // + // Every Auto Scaling group has three size parameters (DesiredCapacity, MaxSize, + // and MinSize). Usually, you set these sizes based on a specific number of + // instances. However, if you configure a mixed instances policy that defines + // weights for the instance types, you must specify these sizes with the same + // units that you use for weighting instances. WeightedCapacity *string `min:"1" type:"string"` } @@ -15519,11 +15641,13 @@ func (s *MetricStat) SetUnit(v string) *MetricStat { return s } -// Describes a mixed instances policy. A mixed instances policy contains the -// instance types that Amazon EC2 Auto Scaling can launch and other information -// that Amazon EC2 Auto Scaling can use to launch instances and help optimize -// your costs. For more information, see Auto Scaling groups with multiple instance -// types and purchase options (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-mixed-instances-groups.html) +// Use this structure to launch multiple instance types and On-Demand Instances +// and Spot Instances within a single Auto Scaling group. +// +// A mixed instances policy contains information that Amazon EC2 Auto Scaling +// can use to launch instances and help optimize your costs. For more information, +// see Auto Scaling groups with multiple instance types and purchase options +// (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-mixed-instances-groups.html) // in the Amazon EC2 Auto Scaling User Guide. type MixedInstancesPolicy struct { _ struct{} `type:"structure"` @@ -15581,6 +15705,55 @@ func (s *MixedInstancesPolicy) SetLaunchTemplate(v *LaunchTemplate) *MixedInstan return s } +// Specifies the minimum and maximum for the NetworkBandwidthGbps object when +// you specify InstanceRequirements for an Auto Scaling group. +// +// Setting the minimum bandwidth does not guarantee that your instance will +// achieve the minimum bandwidth. Amazon EC2 will identify instance types that +// support the specified minimum bandwidth, but the actual bandwidth of your +// instance might go below the specified minimum at times. For more information, +// see Available instance bandwidth (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-network-bandwidth.html#available-instance-bandwidth) +// in the Amazon EC2 User Guide for Linux Instances. +type NetworkBandwidthGbpsRequest struct { + _ struct{} `type:"structure"` + + // The maximum amount of network bandwidth, in gigabits per second (Gbps). + Max *float64 `type:"double"` + + // The minimum amount of network bandwidth, in gigabits per second (Gbps). + Min *float64 `type:"double"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NetworkBandwidthGbpsRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NetworkBandwidthGbpsRequest) GoString() string { + return s.String() +} + +// SetMax sets the Max field's value. +func (s *NetworkBandwidthGbpsRequest) SetMax(v float64) *NetworkBandwidthGbpsRequest { + s.Max = &v + return s +} + +// SetMin sets the Min field's value. +func (s *NetworkBandwidthGbpsRequest) SetMin(v float64) *NetworkBandwidthGbpsRequest { + s.Min = &v + return s +} + // Specifies the minimum and maximum for the NetworkInterfaceCount object when // you specify InstanceRequirements for an Auto Scaling group. type NetworkInterfaceCountRequest struct { @@ -19352,7 +19525,7 @@ type UpdateAutoScalingGroupInput struct { // and marking it unhealthy due to a failed Elastic Load Balancing or custom // health check. This is useful if your instances do not immediately pass these // health checks after they enter the InService state. For more information, - // see Health check grace period (https://docs.aws.amazon.com/autoscaling/ec2/userguide/healthcheck.html#health-check-grace-period) + // see Set the health check grace period for an Auto Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/health-check-grace-period.html) // in the Amazon EC2 Auto Scaling User Guide. HealthCheckGracePeriod *int64 `type:"integer"` @@ -19391,9 +19564,8 @@ type UpdateAutoScalingGroupInput struct { // The minimum size of the Auto Scaling group. MinSize *int64 `type:"integer"` - // An embedded object that specifies a mixed instances policy. For more information, - // see Auto Scaling groups with multiple instance types and purchase options - // (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-mixed-instances-groups.html) + // The mixed instances policy. For more information, see Auto Scaling groups + // with multiple instance types and purchase options (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-mixed-instances-groups.html) // in the Amazon EC2 Auto Scaling User Guide. MixedInstancesPolicy *MixedInstancesPolicy `type:"structure"` diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index c13d0c3d3ad3..23be76a83ff7 100644 --- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -13,6 +13,81 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/ec2query" ) +const opAcceptAddressTransfer = "AcceptAddressTransfer" + +// AcceptAddressTransferRequest generates a "aws/request.Request" representing the +// client's request for the AcceptAddressTransfer operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AcceptAddressTransfer for more information on using the AcceptAddressTransfer +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the AcceptAddressTransferRequest method. +// req, resp := client.AcceptAddressTransferRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AcceptAddressTransfer +func (c *EC2) AcceptAddressTransferRequest(input *AcceptAddressTransferInput) (req *request.Request, output *AcceptAddressTransferOutput) { + op := &request.Operation{ + Name: opAcceptAddressTransfer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AcceptAddressTransferInput{} + } + + output = &AcceptAddressTransferOutput{} + req = c.newRequest(op, input, output) + return +} + +// AcceptAddressTransfer API operation for Amazon Elastic Compute Cloud. +// +// Accepts an Elastic IP address transfer. For more information, see Accept +// a transferred Elastic IP address (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-eips.html#using-instance-addressing-eips-transfer-accept) +// in the Amazon Virtual Private Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation AcceptAddressTransfer for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AcceptAddressTransfer +func (c *EC2) AcceptAddressTransfer(input *AcceptAddressTransferInput) (*AcceptAddressTransferOutput, error) { + req, out := c.AcceptAddressTransferRequest(input) + return out, req.Send() +} + +// AcceptAddressTransferWithContext is the same as AcceptAddressTransfer with the addition of +// the ability to pass a context and additional request options. +// +// See AcceptAddressTransfer for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AcceptAddressTransferWithContext(ctx aws.Context, input *AcceptAddressTransferInput, opts ...request.Option) (*AcceptAddressTransferOutput, error) { + req, out := c.AcceptAddressTransferRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opAcceptReservedInstancesExchangeQuote = "AcceptReservedInstancesExchangeQuote" // AcceptReservedInstancesExchangeQuoteRequest generates a "aws/request.Request" representing the @@ -3281,6 +3356,82 @@ func (c *EC2) CancelExportTaskWithContext(ctx aws.Context, input *CancelExportTa return out, req.Send() } +const opCancelImageLaunchPermission = "CancelImageLaunchPermission" + +// CancelImageLaunchPermissionRequest generates a "aws/request.Request" representing the +// client's request for the CancelImageLaunchPermission operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CancelImageLaunchPermission for more information on using the CancelImageLaunchPermission +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CancelImageLaunchPermissionRequest method. +// req, resp := client.CancelImageLaunchPermissionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelImageLaunchPermission +func (c *EC2) CancelImageLaunchPermissionRequest(input *CancelImageLaunchPermissionInput) (req *request.Request, output *CancelImageLaunchPermissionOutput) { + op := &request.Operation{ + Name: opCancelImageLaunchPermission, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelImageLaunchPermissionInput{} + } + + output = &CancelImageLaunchPermissionOutput{} + req = c.newRequest(op, input, output) + return +} + +// CancelImageLaunchPermission API operation for Amazon Elastic Compute Cloud. +// +// Removes your Amazon Web Services account from the launch permissions for +// the specified AMI. For more information, see Cancel having an AMI shared +// with your Amazon Web Services account (https://docs.aws.amazon.com/) in the +// Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CancelImageLaunchPermission for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelImageLaunchPermission +func (c *EC2) CancelImageLaunchPermission(input *CancelImageLaunchPermissionInput) (*CancelImageLaunchPermissionOutput, error) { + req, out := c.CancelImageLaunchPermissionRequest(input) + return out, req.Send() +} + +// CancelImageLaunchPermissionWithContext is the same as CancelImageLaunchPermission with the addition of +// the ability to pass a context and additional request options. +// +// See CancelImageLaunchPermission for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CancelImageLaunchPermissionWithContext(ctx aws.Context, input *CancelImageLaunchPermissionInput, opts ...request.Option) (*CancelImageLaunchPermissionOutput, error) { + req, out := c.CancelImageLaunchPermissionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCancelImportTask = "CancelImportTask" // CancelImportTaskRequest generates a "aws/request.Request" representing the @@ -3800,7 +3951,7 @@ func (c *EC2) CopyImageRequest(input *CopyImageInput) (req *request.Request, out // in the Amazon Elastic Compute Cloud User Guide. // // For more information about the prerequisites and limits when copying an AMI, -// see Copying an AMI (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/CopyingAMIs.html) +// see Copy an AMI (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/CopyingAMIs.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5224,7 +5375,7 @@ func (c *EC2) CreateImageRequest(input *CreateImageInput) (req *request.Request, // from this new AMI, the instance automatically launches with those additional // volumes. // -// For more information, see Creating Amazon EBS-Backed Linux AMIs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami-ebs.html) +// For more information, see Create an Amazon EBS-backed Linux AMI (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami-ebs.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -7148,11 +7299,12 @@ func (c *EC2) CreateReplaceRootVolumeTaskRequest(input *CreateReplaceRootVolumeT // CreateReplaceRootVolumeTask API operation for Amazon Elastic Compute Cloud. // -// Creates a root volume replacement task for an Amazon EC2 instance. The root -// volume can either be restored to its initial launch state, or it can be restored -// using a specific snapshot. +// Replaces the EBS-backed root volume for a running instance with a new volume +// that is restored to the original root volume's launch state, that is restored +// to a specific snapshot taken from the original root volume, or that is restored +// from an AMI that has the same key characteristics as that of the instance. // -// For more information, see Replace a root volume (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-restoring-volume.html#replace-root) +// For more information, see Replace a root volume (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/replace-root.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -15966,6 +16118,138 @@ func (c *EC2) DescribeAccountAttributesWithContext(ctx aws.Context, input *Descr return out, req.Send() } +const opDescribeAddressTransfers = "DescribeAddressTransfers" + +// DescribeAddressTransfersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAddressTransfers operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeAddressTransfers for more information on using the DescribeAddressTransfers +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeAddressTransfersRequest method. +// req, resp := client.DescribeAddressTransfersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeAddressTransfers +func (c *EC2) DescribeAddressTransfersRequest(input *DescribeAddressTransfersInput) (req *request.Request, output *DescribeAddressTransfersOutput) { + op := &request.Operation{ + Name: opDescribeAddressTransfers, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeAddressTransfersInput{} + } + + output = &DescribeAddressTransfersOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeAddressTransfers API operation for Amazon Elastic Compute Cloud. +// +// Describes an Elastic IP address transfer. For more information, see Transfer +// Elastic IP addresses (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-eips.html#transfer-EIPs-intro) +// in the Amazon Virtual Private Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeAddressTransfers for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeAddressTransfers +func (c *EC2) DescribeAddressTransfers(input *DescribeAddressTransfersInput) (*DescribeAddressTransfersOutput, error) { + req, out := c.DescribeAddressTransfersRequest(input) + return out, req.Send() +} + +// DescribeAddressTransfersWithContext is the same as DescribeAddressTransfers with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAddressTransfers for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeAddressTransfersWithContext(ctx aws.Context, input *DescribeAddressTransfersInput, opts ...request.Option) (*DescribeAddressTransfersOutput, error) { + req, out := c.DescribeAddressTransfersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeAddressTransfersPages iterates over the pages of a DescribeAddressTransfers operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeAddressTransfers method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeAddressTransfers operation. +// pageNum := 0 +// err := client.DescribeAddressTransfersPages(params, +// func(page *ec2.DescribeAddressTransfersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *EC2) DescribeAddressTransfersPages(input *DescribeAddressTransfersInput, fn func(*DescribeAddressTransfersOutput, bool) bool) error { + return c.DescribeAddressTransfersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeAddressTransfersPagesWithContext same as DescribeAddressTransfersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeAddressTransfersPagesWithContext(ctx aws.Context, input *DescribeAddressTransfersInput, fn func(*DescribeAddressTransfersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeAddressTransfersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeAddressTransfersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeAddressTransfersOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeAddresses = "DescribeAddresses" // DescribeAddressesRequest generates a "aws/request.Request" representing the @@ -16344,6 +16628,136 @@ func (c *EC2) DescribeAvailabilityZonesWithContext(ctx aws.Context, input *Descr return out, req.Send() } +const opDescribeAwsNetworkPerformanceMetricSubscriptions = "DescribeAwsNetworkPerformanceMetricSubscriptions" + +// DescribeAwsNetworkPerformanceMetricSubscriptionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAwsNetworkPerformanceMetricSubscriptions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeAwsNetworkPerformanceMetricSubscriptions for more information on using the DescribeAwsNetworkPerformanceMetricSubscriptions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeAwsNetworkPerformanceMetricSubscriptionsRequest method. +// req, resp := client.DescribeAwsNetworkPerformanceMetricSubscriptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeAwsNetworkPerformanceMetricSubscriptions +func (c *EC2) DescribeAwsNetworkPerformanceMetricSubscriptionsRequest(input *DescribeAwsNetworkPerformanceMetricSubscriptionsInput) (req *request.Request, output *DescribeAwsNetworkPerformanceMetricSubscriptionsOutput) { + op := &request.Operation{ + Name: opDescribeAwsNetworkPerformanceMetricSubscriptions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeAwsNetworkPerformanceMetricSubscriptionsInput{} + } + + output = &DescribeAwsNetworkPerformanceMetricSubscriptionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeAwsNetworkPerformanceMetricSubscriptions API operation for Amazon Elastic Compute Cloud. +// +// Describes the curent Infrastructure Performance metric subscriptions. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeAwsNetworkPerformanceMetricSubscriptions for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeAwsNetworkPerformanceMetricSubscriptions +func (c *EC2) DescribeAwsNetworkPerformanceMetricSubscriptions(input *DescribeAwsNetworkPerformanceMetricSubscriptionsInput) (*DescribeAwsNetworkPerformanceMetricSubscriptionsOutput, error) { + req, out := c.DescribeAwsNetworkPerformanceMetricSubscriptionsRequest(input) + return out, req.Send() +} + +// DescribeAwsNetworkPerformanceMetricSubscriptionsWithContext is the same as DescribeAwsNetworkPerformanceMetricSubscriptions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAwsNetworkPerformanceMetricSubscriptions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeAwsNetworkPerformanceMetricSubscriptionsWithContext(ctx aws.Context, input *DescribeAwsNetworkPerformanceMetricSubscriptionsInput, opts ...request.Option) (*DescribeAwsNetworkPerformanceMetricSubscriptionsOutput, error) { + req, out := c.DescribeAwsNetworkPerformanceMetricSubscriptionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeAwsNetworkPerformanceMetricSubscriptionsPages iterates over the pages of a DescribeAwsNetworkPerformanceMetricSubscriptions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeAwsNetworkPerformanceMetricSubscriptions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeAwsNetworkPerformanceMetricSubscriptions operation. +// pageNum := 0 +// err := client.DescribeAwsNetworkPerformanceMetricSubscriptionsPages(params, +// func(page *ec2.DescribeAwsNetworkPerformanceMetricSubscriptionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *EC2) DescribeAwsNetworkPerformanceMetricSubscriptionsPages(input *DescribeAwsNetworkPerformanceMetricSubscriptionsInput, fn func(*DescribeAwsNetworkPerformanceMetricSubscriptionsOutput, bool) bool) error { + return c.DescribeAwsNetworkPerformanceMetricSubscriptionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeAwsNetworkPerformanceMetricSubscriptionsPagesWithContext same as DescribeAwsNetworkPerformanceMetricSubscriptionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeAwsNetworkPerformanceMetricSubscriptionsPagesWithContext(ctx aws.Context, input *DescribeAwsNetworkPerformanceMetricSubscriptionsInput, fn func(*DescribeAwsNetworkPerformanceMetricSubscriptionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeAwsNetworkPerformanceMetricSubscriptionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeAwsNetworkPerformanceMetricSubscriptionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeAwsNetworkPerformanceMetricSubscriptionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeBundleTasks = "DescribeBundleTasks" // DescribeBundleTasksRequest generates a "aws/request.Request" representing the @@ -25358,7 +25772,7 @@ func (c *EC2) DescribeReplaceRootVolumeTasksRequest(input *DescribeReplaceRootVo // DescribeReplaceRootVolumeTasks API operation for Amazon Elastic Compute Cloud. // // Describes a root volume replacement task. For more information, see Replace -// a root volume (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-restoring-volume.html#replace-root) +// a root volume (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/replace-root.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -27224,7 +27638,7 @@ func (c *EC2) DescribeSpotFleetRequestHistoryRequest(input *DescribeSpotFleetReq // recorded event. Spot Fleet events are available for 48 hours. // // For more information, see Monitor fleet events using Amazon EventBridge (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/fleet-monitor.html) -// in the Amazon EC2 User Guide for Linux Instances. +// in the Amazon EC2 User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -32400,6 +32814,154 @@ func (c *EC2) DetachVpnGatewayWithContext(ctx aws.Context, input *DetachVpnGatew return out, req.Send() } +const opDisableAddressTransfer = "DisableAddressTransfer" + +// DisableAddressTransferRequest generates a "aws/request.Request" representing the +// client's request for the DisableAddressTransfer operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisableAddressTransfer for more information on using the DisableAddressTransfer +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DisableAddressTransferRequest method. +// req, resp := client.DisableAddressTransferRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableAddressTransfer +func (c *EC2) DisableAddressTransferRequest(input *DisableAddressTransferInput) (req *request.Request, output *DisableAddressTransferOutput) { + op := &request.Operation{ + Name: opDisableAddressTransfer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableAddressTransferInput{} + } + + output = &DisableAddressTransferOutput{} + req = c.newRequest(op, input, output) + return +} + +// DisableAddressTransfer API operation for Amazon Elastic Compute Cloud. +// +// Disables Elastic IP address transfer. For more information, see Transfer +// Elastic IP addresses (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-eips.html#transfer-EIPs-intro) +// in the Amazon Virtual Private Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DisableAddressTransfer for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableAddressTransfer +func (c *EC2) DisableAddressTransfer(input *DisableAddressTransferInput) (*DisableAddressTransferOutput, error) { + req, out := c.DisableAddressTransferRequest(input) + return out, req.Send() +} + +// DisableAddressTransferWithContext is the same as DisableAddressTransfer with the addition of +// the ability to pass a context and additional request options. +// +// See DisableAddressTransfer for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DisableAddressTransferWithContext(ctx aws.Context, input *DisableAddressTransferInput, opts ...request.Option) (*DisableAddressTransferOutput, error) { + req, out := c.DisableAddressTransferRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDisableAwsNetworkPerformanceMetricSubscription = "DisableAwsNetworkPerformanceMetricSubscription" + +// DisableAwsNetworkPerformanceMetricSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the DisableAwsNetworkPerformanceMetricSubscription operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisableAwsNetworkPerformanceMetricSubscription for more information on using the DisableAwsNetworkPerformanceMetricSubscription +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DisableAwsNetworkPerformanceMetricSubscriptionRequest method. +// req, resp := client.DisableAwsNetworkPerformanceMetricSubscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableAwsNetworkPerformanceMetricSubscription +func (c *EC2) DisableAwsNetworkPerformanceMetricSubscriptionRequest(input *DisableAwsNetworkPerformanceMetricSubscriptionInput) (req *request.Request, output *DisableAwsNetworkPerformanceMetricSubscriptionOutput) { + op := &request.Operation{ + Name: opDisableAwsNetworkPerformanceMetricSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableAwsNetworkPerformanceMetricSubscriptionInput{} + } + + output = &DisableAwsNetworkPerformanceMetricSubscriptionOutput{} + req = c.newRequest(op, input, output) + return +} + +// DisableAwsNetworkPerformanceMetricSubscription API operation for Amazon Elastic Compute Cloud. +// +// Disables Infrastructure Performance metric subscriptions. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DisableAwsNetworkPerformanceMetricSubscription for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableAwsNetworkPerformanceMetricSubscription +func (c *EC2) DisableAwsNetworkPerformanceMetricSubscription(input *DisableAwsNetworkPerformanceMetricSubscriptionInput) (*DisableAwsNetworkPerformanceMetricSubscriptionOutput, error) { + req, out := c.DisableAwsNetworkPerformanceMetricSubscriptionRequest(input) + return out, req.Send() +} + +// DisableAwsNetworkPerformanceMetricSubscriptionWithContext is the same as DisableAwsNetworkPerformanceMetricSubscription with the addition of +// the ability to pass a context and additional request options. +// +// See DisableAwsNetworkPerformanceMetricSubscription for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DisableAwsNetworkPerformanceMetricSubscriptionWithContext(ctx aws.Context, input *DisableAwsNetworkPerformanceMetricSubscriptionInput, opts ...request.Option) (*DisableAwsNetworkPerformanceMetricSubscriptionOutput, error) { + req, out := c.DisableAwsNetworkPerformanceMetricSubscriptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDisableEbsEncryptionByDefault = "DisableEbsEncryptionByDefault" // DisableEbsEncryptionByDefaultRequest generates a "aws/request.Request" representing the @@ -34099,6 +34661,154 @@ func (c *EC2) DisassociateVpcCidrBlockWithContext(ctx aws.Context, input *Disass return out, req.Send() } +const opEnableAddressTransfer = "EnableAddressTransfer" + +// EnableAddressTransferRequest generates a "aws/request.Request" representing the +// client's request for the EnableAddressTransfer operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See EnableAddressTransfer for more information on using the EnableAddressTransfer +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the EnableAddressTransferRequest method. +// req, resp := client.EnableAddressTransferRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableAddressTransfer +func (c *EC2) EnableAddressTransferRequest(input *EnableAddressTransferInput) (req *request.Request, output *EnableAddressTransferOutput) { + op := &request.Operation{ + Name: opEnableAddressTransfer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableAddressTransferInput{} + } + + output = &EnableAddressTransferOutput{} + req = c.newRequest(op, input, output) + return +} + +// EnableAddressTransfer API operation for Amazon Elastic Compute Cloud. +// +// Enables Elastic IP address transfer. For more information, see Transfer Elastic +// IP addresses (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-eips.html#transfer-EIPs-intro) +// in the Amazon Virtual Private Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation EnableAddressTransfer for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableAddressTransfer +func (c *EC2) EnableAddressTransfer(input *EnableAddressTransferInput) (*EnableAddressTransferOutput, error) { + req, out := c.EnableAddressTransferRequest(input) + return out, req.Send() +} + +// EnableAddressTransferWithContext is the same as EnableAddressTransfer with the addition of +// the ability to pass a context and additional request options. +// +// See EnableAddressTransfer for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) EnableAddressTransferWithContext(ctx aws.Context, input *EnableAddressTransferInput, opts ...request.Option) (*EnableAddressTransferOutput, error) { + req, out := c.EnableAddressTransferRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opEnableAwsNetworkPerformanceMetricSubscription = "EnableAwsNetworkPerformanceMetricSubscription" + +// EnableAwsNetworkPerformanceMetricSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the EnableAwsNetworkPerformanceMetricSubscription operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See EnableAwsNetworkPerformanceMetricSubscription for more information on using the EnableAwsNetworkPerformanceMetricSubscription +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the EnableAwsNetworkPerformanceMetricSubscriptionRequest method. +// req, resp := client.EnableAwsNetworkPerformanceMetricSubscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableAwsNetworkPerformanceMetricSubscription +func (c *EC2) EnableAwsNetworkPerformanceMetricSubscriptionRequest(input *EnableAwsNetworkPerformanceMetricSubscriptionInput) (req *request.Request, output *EnableAwsNetworkPerformanceMetricSubscriptionOutput) { + op := &request.Operation{ + Name: opEnableAwsNetworkPerformanceMetricSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableAwsNetworkPerformanceMetricSubscriptionInput{} + } + + output = &EnableAwsNetworkPerformanceMetricSubscriptionOutput{} + req = c.newRequest(op, input, output) + return +} + +// EnableAwsNetworkPerformanceMetricSubscription API operation for Amazon Elastic Compute Cloud. +// +// Enables Infrastructure Performance subscriptions. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation EnableAwsNetworkPerformanceMetricSubscription for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableAwsNetworkPerformanceMetricSubscription +func (c *EC2) EnableAwsNetworkPerformanceMetricSubscription(input *EnableAwsNetworkPerformanceMetricSubscriptionInput) (*EnableAwsNetworkPerformanceMetricSubscriptionOutput, error) { + req, out := c.EnableAwsNetworkPerformanceMetricSubscriptionRequest(input) + return out, req.Send() +} + +// EnableAwsNetworkPerformanceMetricSubscriptionWithContext is the same as EnableAwsNetworkPerformanceMetricSubscription with the addition of +// the ability to pass a context and additional request options. +// +// See EnableAwsNetworkPerformanceMetricSubscription for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) EnableAwsNetworkPerformanceMetricSubscriptionWithContext(ctx aws.Context, input *EnableAwsNetworkPerformanceMetricSubscriptionInput, opts ...request.Option) (*EnableAwsNetworkPerformanceMetricSubscriptionOutput, error) { + req, out := c.EnableAwsNetworkPerformanceMetricSubscriptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opEnableEbsEncryptionByDefault = "EnableEbsEncryptionByDefault" // EnableEbsEncryptionByDefaultRequest generates a "aws/request.Request" representing the @@ -34501,6 +35211,77 @@ func (c *EC2) EnableIpamOrganizationAdminAccountWithContext(ctx aws.Context, inp return out, req.Send() } +const opEnableReachabilityAnalyzerOrganizationSharing = "EnableReachabilityAnalyzerOrganizationSharing" + +// EnableReachabilityAnalyzerOrganizationSharingRequest generates a "aws/request.Request" representing the +// client's request for the EnableReachabilityAnalyzerOrganizationSharing operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See EnableReachabilityAnalyzerOrganizationSharing for more information on using the EnableReachabilityAnalyzerOrganizationSharing +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the EnableReachabilityAnalyzerOrganizationSharingRequest method. +// req, resp := client.EnableReachabilityAnalyzerOrganizationSharingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableReachabilityAnalyzerOrganizationSharing +func (c *EC2) EnableReachabilityAnalyzerOrganizationSharingRequest(input *EnableReachabilityAnalyzerOrganizationSharingInput) (req *request.Request, output *EnableReachabilityAnalyzerOrganizationSharingOutput) { + op := &request.Operation{ + Name: opEnableReachabilityAnalyzerOrganizationSharing, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableReachabilityAnalyzerOrganizationSharingInput{} + } + + output = &EnableReachabilityAnalyzerOrganizationSharingOutput{} + req = c.newRequest(op, input, output) + return +} + +// EnableReachabilityAnalyzerOrganizationSharing API operation for Amazon Elastic Compute Cloud. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation EnableReachabilityAnalyzerOrganizationSharing for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableReachabilityAnalyzerOrganizationSharing +func (c *EC2) EnableReachabilityAnalyzerOrganizationSharing(input *EnableReachabilityAnalyzerOrganizationSharingInput) (*EnableReachabilityAnalyzerOrganizationSharingOutput, error) { + req, out := c.EnableReachabilityAnalyzerOrganizationSharingRequest(input) + return out, req.Send() +} + +// EnableReachabilityAnalyzerOrganizationSharingWithContext is the same as EnableReachabilityAnalyzerOrganizationSharing with the addition of +// the ability to pass a context and additional request options. +// +// See EnableReachabilityAnalyzerOrganizationSharing for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) EnableReachabilityAnalyzerOrganizationSharingWithContext(ctx aws.Context, input *EnableReachabilityAnalyzerOrganizationSharingInput, opts ...request.Option) (*EnableReachabilityAnalyzerOrganizationSharingOutput, error) { + req, out := c.EnableReachabilityAnalyzerOrganizationSharingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opEnableSerialConsoleAccess = "EnableSerialConsoleAccess" // EnableSerialConsoleAccessRequest generates a "aws/request.Request" representing the @@ -35482,6 +36263,136 @@ func (c *EC2) GetAssociatedIpv6PoolCidrsPagesWithContext(ctx aws.Context, input return p.Err() } +const opGetAwsNetworkPerformanceData = "GetAwsNetworkPerformanceData" + +// GetAwsNetworkPerformanceDataRequest generates a "aws/request.Request" representing the +// client's request for the GetAwsNetworkPerformanceData operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetAwsNetworkPerformanceData for more information on using the GetAwsNetworkPerformanceData +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetAwsNetworkPerformanceDataRequest method. +// req, resp := client.GetAwsNetworkPerformanceDataRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetAwsNetworkPerformanceData +func (c *EC2) GetAwsNetworkPerformanceDataRequest(input *GetAwsNetworkPerformanceDataInput) (req *request.Request, output *GetAwsNetworkPerformanceDataOutput) { + op := &request.Operation{ + Name: opGetAwsNetworkPerformanceData, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetAwsNetworkPerformanceDataInput{} + } + + output = &GetAwsNetworkPerformanceDataOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetAwsNetworkPerformanceData API operation for Amazon Elastic Compute Cloud. +// +// Gets network performance data. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetAwsNetworkPerformanceData for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetAwsNetworkPerformanceData +func (c *EC2) GetAwsNetworkPerformanceData(input *GetAwsNetworkPerformanceDataInput) (*GetAwsNetworkPerformanceDataOutput, error) { + req, out := c.GetAwsNetworkPerformanceDataRequest(input) + return out, req.Send() +} + +// GetAwsNetworkPerformanceDataWithContext is the same as GetAwsNetworkPerformanceData with the addition of +// the ability to pass a context and additional request options. +// +// See GetAwsNetworkPerformanceData for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetAwsNetworkPerformanceDataWithContext(ctx aws.Context, input *GetAwsNetworkPerformanceDataInput, opts ...request.Option) (*GetAwsNetworkPerformanceDataOutput, error) { + req, out := c.GetAwsNetworkPerformanceDataRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetAwsNetworkPerformanceDataPages iterates over the pages of a GetAwsNetworkPerformanceData operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetAwsNetworkPerformanceData method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetAwsNetworkPerformanceData operation. +// pageNum := 0 +// err := client.GetAwsNetworkPerformanceDataPages(params, +// func(page *ec2.GetAwsNetworkPerformanceDataOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *EC2) GetAwsNetworkPerformanceDataPages(input *GetAwsNetworkPerformanceDataInput, fn func(*GetAwsNetworkPerformanceDataOutput, bool) bool) error { + return c.GetAwsNetworkPerformanceDataPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetAwsNetworkPerformanceDataPagesWithContext same as GetAwsNetworkPerformanceDataPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetAwsNetworkPerformanceDataPagesWithContext(ctx aws.Context, input *GetAwsNetworkPerformanceDataInput, fn func(*GetAwsNetworkPerformanceDataOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetAwsNetworkPerformanceDataInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetAwsNetworkPerformanceDataRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetAwsNetworkPerformanceDataOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opGetCapacityReservationUsage = "GetCapacityReservationUsage" // GetCapacityReservationUsageRequest generates a "aws/request.Request" representing the @@ -45233,7 +46144,7 @@ func (c *EC2) RegisterImageRequest(input *RegisterImageInput) (req *request.Requ // // Registers an AMI. When you're creating an AMI, this is the final step you // must complete before you can launch an instance from the AMI. For more information -// about creating AMIs, see Creating your own AMIs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami.html) +// about creating AMIs, see Create your own AMI (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami.html) // in the Amazon Elastic Compute Cloud User Guide. // // For Amazon EBS-backed instances, CreateImage creates and registers the AMI @@ -45282,7 +46193,7 @@ func (c *EC2) RegisterImageRequest(input *RegisterImageInput) (req *request.Requ // a Reserved Instance without the matching billing product code, the Reserved // Instance will not be applied to the On-Demand Instance. For information about // how to obtain the platform details and billing information of an AMI, see -// Understanding AMI billing (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-billing-info.html) +// Understand AMI billing information (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-billing-info.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -46799,12 +47710,12 @@ func (c *EC2) RequestSpotFleetRequest(input *RequestSpotFleetInput) (req *reques // only the spot-fleet-request and instance resource types are supported. // // For more information, see Spot Fleet requests (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-requests.html) -// in the Amazon EC2 User Guide for Linux Instances. +// in the Amazon EC2 User Guide. // // We strongly discourage using the RequestSpotFleet API because it is a legacy // API with no planned investment. For options for requesting Spot Instances, // see Which is the best Spot request method to use? (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-best-practices.html#which-spot-request-method-to-use) -// in the Amazon EC2 User Guide for Linux Instances. +// in the Amazon EC2 User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -50009,6 +50920,108 @@ func (s *AcceleratorTotalMemoryMiBRequest) SetMin(v int64) *AcceleratorTotalMemo return s } +type AcceptAddressTransferInput struct { + _ struct{} `type:"structure"` + + // The Elastic IP address you are accepting for transfer. + // + // Address is a required field + Address *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner and + // the value TeamA, specify tag:Owner for the filter name and TeamA for the + // filter value. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AcceptAddressTransferInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AcceptAddressTransferInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AcceptAddressTransferInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AcceptAddressTransferInput"} + if s.Address == nil { + invalidParams.Add(request.NewErrParamRequired("Address")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAddress sets the Address field's value. +func (s *AcceptAddressTransferInput) SetAddress(v string) *AcceptAddressTransferInput { + s.Address = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *AcceptAddressTransferInput) SetDryRun(v bool) *AcceptAddressTransferInput { + s.DryRun = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *AcceptAddressTransferInput) SetTagSpecifications(v []*TagSpecification) *AcceptAddressTransferInput { + s.TagSpecifications = v + return s +} + +type AcceptAddressTransferOutput struct { + _ struct{} `type:"structure"` + + // An Elastic IP address transfer. + AddressTransfer *AddressTransfer `locationName:"addressTransfer" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AcceptAddressTransferOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AcceptAddressTransferOutput) GoString() string { + return s.String() +} + +// SetAddressTransfer sets the AddressTransfer field's value. +func (s *AcceptAddressTransferOutput) SetAddressTransfer(v *AddressTransfer) *AcceptAddressTransferOutput { + s.AddressTransfer = v + return s +} + // Contains the parameters for accepting the quote. type AcceptReservedInstancesExchangeQuoteInput struct { _ struct{} `type:"structure"` @@ -50185,7 +51198,7 @@ func (s *AcceptTransitGatewayMulticastDomainAssociationsInput) SetTransitGateway type AcceptTransitGatewayMulticastDomainAssociationsOutput struct { _ struct{} `type:"structure"` - // Describes the multicast domain associations. + // Information about the multicast domain associations. Associations *TransitGatewayMulticastDomainAssociations `locationName:"associations" type:"structure"` } @@ -51271,6 +52284,88 @@ func (s *AddressAttribute) SetPublicIp(v string) *AddressAttribute { return s } +// Details on the Elastic IP address transfer. For more information, see Transfer +// Elastic IP addresses (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-eips.html#transfer-EIPs-intro) +// in the Amazon Virtual Private Cloud User Guide. +type AddressTransfer struct { + _ struct{} `type:"structure"` + + // The Elastic IP address transfer status. + AddressTransferStatus *string `locationName:"addressTransferStatus" type:"string" enum:"AddressTransferStatus"` + + // The allocation ID of an Elastic IP address. + AllocationId *string `locationName:"allocationId" type:"string"` + + // The Elastic IP address being transferred. + PublicIp *string `locationName:"publicIp" type:"string"` + + // The ID of the account that you want to transfer the Elastic IP address to. + TransferAccountId *string `locationName:"transferAccountId" type:"string"` + + // The timestamp when the Elastic IP address transfer was accepted. + TransferOfferAcceptedTimestamp *time.Time `locationName:"transferOfferAcceptedTimestamp" type:"timestamp"` + + // The timestamp when the Elastic IP address transfer expired. When the source + // account starts the transfer, the transfer account has seven hours to allocate + // the Elastic IP address to complete the transfer, or the Elastic IP address + // will return to its original owner. + TransferOfferExpirationTimestamp *time.Time `locationName:"transferOfferExpirationTimestamp" type:"timestamp"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AddressTransfer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AddressTransfer) GoString() string { + return s.String() +} + +// SetAddressTransferStatus sets the AddressTransferStatus field's value. +func (s *AddressTransfer) SetAddressTransferStatus(v string) *AddressTransfer { + s.AddressTransferStatus = &v + return s +} + +// SetAllocationId sets the AllocationId field's value. +func (s *AddressTransfer) SetAllocationId(v string) *AddressTransfer { + s.AllocationId = &v + return s +} + +// SetPublicIp sets the PublicIp field's value. +func (s *AddressTransfer) SetPublicIp(v string) *AddressTransfer { + s.PublicIp = &v + return s +} + +// SetTransferAccountId sets the TransferAccountId field's value. +func (s *AddressTransfer) SetTransferAccountId(v string) *AddressTransfer { + s.TransferAccountId = &v + return s +} + +// SetTransferOfferAcceptedTimestamp sets the TransferOfferAcceptedTimestamp field's value. +func (s *AddressTransfer) SetTransferOfferAcceptedTimestamp(v time.Time) *AddressTransfer { + s.TransferOfferAcceptedTimestamp = &v + return s +} + +// SetTransferOfferExpirationTimestamp sets the TransferOfferExpirationTimestamp field's value. +func (s *AddressTransfer) SetTransferOfferExpirationTimestamp(v time.Time) *AddressTransfer { + s.TransferOfferExpirationTimestamp = &v + return s +} + type AdvertiseByoipCidrInput struct { _ struct{} `type:"structure"` @@ -54943,6 +56038,10 @@ type AttachNetworkInterfaceInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` + // Configures ENA Express for the network interface that this action attaches + // to the instance. + EnaSrdSpecification *EnaSrdSpecification `type:"structure"` + // The ID of the instance. // // InstanceId is a required field @@ -55008,6 +56107,12 @@ func (s *AttachNetworkInterfaceInput) SetDryRun(v bool) *AttachNetworkInterfaceI return s } +// SetEnaSrdSpecification sets the EnaSrdSpecification field's value. +func (s *AttachNetworkInterfaceInput) SetEnaSrdSpecification(v *EnaSrdSpecification) *AttachNetworkInterfaceInput { + s.EnaSrdSpecification = v + return s +} + // SetInstanceId sets the InstanceId field's value. func (s *AttachNetworkInterfaceInput) SetInstanceId(v string) *AttachNetworkInterfaceInput { s.InstanceId = &v @@ -55259,6 +56364,83 @@ func (s *AttachVpnGatewayOutput) SetVpcAttachment(v *VpcAttachment) *AttachVpnGa return s } +// Describes the ENA Express configuration for the network interface that's +// attached to the instance. +type AttachmentEnaSrdSpecification struct { + _ struct{} `type:"structure"` + + // Indicates whether ENA Express is enabled for the network interface that's + // attached to the instance. + EnaSrdEnabled *bool `locationName:"enaSrdEnabled" type:"boolean"` + + // ENA Express configuration for UDP network traffic. + EnaSrdUdpSpecification *AttachmentEnaSrdUdpSpecification `locationName:"enaSrdUdpSpecification" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AttachmentEnaSrdSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AttachmentEnaSrdSpecification) GoString() string { + return s.String() +} + +// SetEnaSrdEnabled sets the EnaSrdEnabled field's value. +func (s *AttachmentEnaSrdSpecification) SetEnaSrdEnabled(v bool) *AttachmentEnaSrdSpecification { + s.EnaSrdEnabled = &v + return s +} + +// SetEnaSrdUdpSpecification sets the EnaSrdUdpSpecification field's value. +func (s *AttachmentEnaSrdSpecification) SetEnaSrdUdpSpecification(v *AttachmentEnaSrdUdpSpecification) *AttachmentEnaSrdSpecification { + s.EnaSrdUdpSpecification = v + return s +} + +// Describes the ENA Express configuration for UDP traffic on the network interface +// that's attached to the instance. +type AttachmentEnaSrdUdpSpecification struct { + _ struct{} `type:"structure"` + + // Indicates whether UDP traffic to and from the instance uses ENA Express. + // To specify this setting, you must first enable ENA Express. + EnaSrdUdpEnabled *bool `locationName:"enaSrdUdpEnabled" type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AttachmentEnaSrdUdpSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AttachmentEnaSrdUdpSpecification) GoString() string { + return s.String() +} + +// SetEnaSrdUdpEnabled sets the EnaSrdUdpEnabled field's value. +func (s *AttachmentEnaSrdUdpSpecification) SetEnaSrdUdpEnabled(v bool) *AttachmentEnaSrdUdpSpecification { + s.EnaSrdUdpEnabled = &v + return s +} + // Describes a value for a resource attribute that is a Boolean value. type AttributeBooleanValue struct { _ struct{} `type:"structure"` @@ -57120,6 +58302,95 @@ func (s CancelExportTaskOutput) GoString() string { return s.String() } +type CancelImageLaunchPermissionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the AMI that was shared with your Amazon Web Services account. + // + // ImageId is a required field + ImageId *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CancelImageLaunchPermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CancelImageLaunchPermissionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelImageLaunchPermissionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelImageLaunchPermissionInput"} + if s.ImageId == nil { + invalidParams.Add(request.NewErrParamRequired("ImageId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *CancelImageLaunchPermissionInput) SetDryRun(v bool) *CancelImageLaunchPermissionInput { + s.DryRun = &v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *CancelImageLaunchPermissionInput) SetImageId(v string) *CancelImageLaunchPermissionInput { + s.ImageId = &v + return s +} + +type CancelImageLaunchPermissionOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CancelImageLaunchPermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CancelImageLaunchPermissionOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *CancelImageLaunchPermissionOutput) SetReturn(v bool) *CancelImageLaunchPermissionOutput { + s.Return = &v + return s +} + type CancelImportTaskInput struct { _ struct{} `type:"structure"` @@ -60936,6 +62207,19 @@ type CopyImageInput struct { // in the Amazon EC2 API Reference. ClientToken *string `type:"string"` + // Indicates whether to include your user-defined AMI tags when copying the + // AMI. + // + // The following tags will not be copied: + // + // * System tags (prefixed with aws:) + // + // * For public and shared AMIs, user-defined tags that are attached by other + // Amazon Web Services accounts + // + // Default: Your user-defined AMI tags are not copied. + CopyImageTags *bool `type:"boolean"` + // A description for the new AMI in the destination Region. Description *string `type:"string"` @@ -60945,8 +62229,8 @@ type CopyImageInput struct { // You cannot copy an AMI from an Outpost to a Region, from one Outpost to another, // or within the same Outpost. // - // For more information, see Copying AMIs from an Amazon Web Services Region - // to an Outpost (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snapshots-outposts.html#copy-amis) + // For more information, see Copy AMIs from an Amazon Web Services Region to + // an Outpost (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snapshots-outposts.html#copy-amis) // in the Amazon Elastic Compute Cloud User Guide. DestinationOutpostArn *string `type:"string"` @@ -60960,7 +62244,7 @@ type CopyImageInput struct { // encrypted. You can encrypt a copy of an unencrypted snapshot, but you cannot // create an unencrypted copy of an encrypted snapshot. The default KMS key // for Amazon EBS is used unless you specify a non-default Key Management Service - // (KMS) KMS key using KmsKeyId. For more information, see Amazon EBS Encryption + // (KMS) KMS key using KmsKeyId. For more information, see Amazon EBS encryption // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) // in the Amazon Elastic Compute Cloud User Guide. Encrypted *bool `locationName:"encrypted" type:"boolean"` @@ -61048,6 +62332,12 @@ func (s *CopyImageInput) SetClientToken(v string) *CopyImageInput { return s } +// SetCopyImageTags sets the CopyImageTags field's value. +func (s *CopyImageInput) SetCopyImageTags(v bool) *CopyImageInput { + s.CopyImageTags = &v + return s +} + // SetDescription sets the Description field's value. func (s *CopyImageInput) SetDescription(v string) *CopyImageInput { s.Description = &v @@ -62743,7 +64033,7 @@ func (s *CreateCoipPoolInput) SetTagSpecifications(v []*TagSpecification) *Creat type CreateCoipPoolOutput struct { _ struct{} `type:"structure"` - // Describes a customer-owned address pool. + // Information about the CoIP address pool. CoipPool *CoipPool `locationName:"coipPool" type:"structure"` } @@ -63762,10 +65052,12 @@ type CreateFlowLogsInput struct { LogDestinationType *string `type:"string" enum:"LogDestinationType"` // The fields to include in the flow log record. List the fields in the order - // in which they should appear. For more information about the available fields, - // see Flow log records (https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs.html#flow-log-records). - // If you omit this parameter, the flow log is created using the default format. - // If you specify this parameter, you must include at least one field. + // in which they should appear. If you omit this parameter, the flow log is + // created using the default format. If you specify this parameter, you must + // include at least one field. For more information about the available fields, + // see Flow log records (https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs.html#flow-log-records) + // in the Amazon VPC User Guide or Transit Gateway Flow Log records (https://docs.aws.amazon.com/vpc/latest/tgw/tgw-flow-logs.html#flow-log-records) + // in the Amazon Web Services Transit Gateway Guide. // // Specify the fields using the ${field-id} format, separated by spaces. For // the CLI, surround this parameter value with single quotes on Linux or double @@ -63779,8 +65071,9 @@ type CreateFlowLogsInput struct { LogGroupName *string `type:"string"` // The maximum interval of time during which a flow of packets is captured and - // aggregated into a flow log record. You can specify 60 seconds (1 minute) - // or 600 seconds (10 minutes). + // aggregated into a flow log record. The possible values are 60 seconds (1 + // minute) or 600 seconds (10 minutes). This parameter must be 60 seconds for + // transit gateway resource types. // // When a network interface is attached to a Nitro-based instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances), // the aggregation interval is always 60 seconds or less, regardless of the @@ -63792,7 +65085,8 @@ type CreateFlowLogsInput struct { // The IDs of the resources to monitor. For example, if the resource type is // VPC, specify the IDs of the VPCs. // - // Constraints: Maximum of 1000 resources + // Constraints: Maximum of 25 for transit gateway resource types. Maximum of + // 1000 for the other resource types. // // ResourceIds is a required field ResourceIds []*string `locationName:"ResourceId" locationNameList:"item" type:"list" required:"true"` @@ -63806,7 +65100,8 @@ type CreateFlowLogsInput struct { TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` // The type of traffic to monitor (accepted traffic, rejected traffic, or all - // traffic). + // traffic). This parameter is not supported for transit gateway resource types. + // It is required for the other resource types. TrafficType *string `type:"string" enum:"TrafficType"` } @@ -65794,7 +67089,7 @@ func (s *CreateLocalGatewayRouteTableInput) SetTagSpecifications(v []*TagSpecifi type CreateLocalGatewayRouteTableOutput struct { _ struct{} `type:"structure"` - // Describes a local gateway route table. + // Information about the local gateway route table. LocalGatewayRouteTable *LocalGatewayRouteTable `locationName:"localGatewayRouteTable" type:"structure"` } @@ -65907,8 +67202,7 @@ func (s *CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociationInput) SetT type CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociationOutput struct { _ struct{} `type:"structure"` - // Describes an association between a local gateway route table and a virtual - // interface group. + // Information about the local gateway route table virtual interface group association. LocalGatewayRouteTableVirtualInterfaceGroupAssociation *LocalGatewayRouteTableVirtualInterfaceGroupAssociation `locationName:"localGatewayRouteTableVirtualInterfaceGroupAssociation" type:"structure"` } @@ -66234,6 +67528,10 @@ type CreateNatGatewayInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` + // The private IPv4 address to assign to the NAT gateway. If you don't provide + // an address, a private IPv4 address will be automatically assigned. + PrivateIpAddress *string `type:"string"` + // The subnet in which to create the NAT gateway. // // SubnetId is a required field @@ -66298,6 +67596,12 @@ func (s *CreateNatGatewayInput) SetDryRun(v bool) *CreateNatGatewayInput { return s } +// SetPrivateIpAddress sets the PrivateIpAddress field's value. +func (s *CreateNatGatewayInput) SetPrivateIpAddress(v string) *CreateNatGatewayInput { + s.PrivateIpAddress = &v + return s +} + // SetSubnetId sets the SubnetId field's value. func (s *CreateNatGatewayInput) SetSubnetId(v string) *CreateNatGatewayInput { s.SubnetId = &v @@ -67396,7 +68700,7 @@ func (s *CreatePlacementGroupInput) SetTagSpecifications(v []*TagSpecification) type CreatePlacementGroupOutput struct { _ struct{} `type:"structure"` - // Describes a placement group. + // Information about the placement group. PlacementGroup *PlacementGroup `locationName:"placementGroup" type:"structure"` } @@ -67510,20 +68814,38 @@ type CreateReplaceRootVolumeTaskInput struct { // Ensuring idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` + // Indicates whether to automatically delete the original root volume after + // the root volume replacement task completes. To delete the original root volume, + // specify true. If you choose to keep the original root volume after the replacement + // task completes, you must manually delete it when you no longer need it. + DeleteReplacedRootVolume *bool `type:"boolean"` + // Checks whether you have the required permissions for the action, without // actually making the request, and provides an error response. If you have // the required permissions, the error response is DryRunOperation. Otherwise, // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` + // The ID of the AMI to use to restore the root volume. The specified AMI must + // have the same product code, billing information, architecture type, and virtualization + // type as that of the instance. + // + // If you want to restore the replacement volume from a specific snapshot, or + // if you want to restore it to its launch state, omit this parameter. + ImageId *string `type:"string"` + // The ID of the instance for which to replace the root volume. // // InstanceId is a required field InstanceId *string `type:"string" required:"true"` // The ID of the snapshot from which to restore the replacement root volume. - // If you want to restore the volume to the initial launch state, omit this - // parameter. + // The specified snapshot must be a snapshot that you previously created from + // the original root volume. + // + // If you want to restore the replacement root volume to the initial launch + // state, or if you want to restore the replacement root volume from an AMI, + // omit this parameter. SnapshotId *string `type:"string"` // The tags to apply to the root volume replacement task. @@ -67567,12 +68889,24 @@ func (s *CreateReplaceRootVolumeTaskInput) SetClientToken(v string) *CreateRepla return s } +// SetDeleteReplacedRootVolume sets the DeleteReplacedRootVolume field's value. +func (s *CreateReplaceRootVolumeTaskInput) SetDeleteReplacedRootVolume(v bool) *CreateReplaceRootVolumeTaskInput { + s.DeleteReplacedRootVolume = &v + return s +} + // SetDryRun sets the DryRun field's value. func (s *CreateReplaceRootVolumeTaskInput) SetDryRun(v bool) *CreateReplaceRootVolumeTaskInput { s.DryRun = &v return s } +// SetImageId sets the ImageId field's value. +func (s *CreateReplaceRootVolumeTaskInput) SetImageId(v string) *CreateReplaceRootVolumeTaskInput { + s.ImageId = &v + return s +} + // SetInstanceId sets the InstanceId field's value. func (s *CreateReplaceRootVolumeTaskInput) SetInstanceId(v string) *CreateReplaceRootVolumeTaskInput { s.InstanceId = &v @@ -73040,6 +74374,182 @@ func (s *CustomerGateway) SetType(v string) *CustomerGateway { return s } +// A query used for retrieving network health data. +type DataQuery struct { + _ struct{} `type:"structure"` + + // The Region or Availability Zone that's the target for the data query. For + // example, eu-north-1. + Destination *string `type:"string"` + + // A user-defined ID associated with a data query that's returned in the dataResponse + // identifying the query. For example, if you set the Id to MyQuery01in the + // query, the dataResponse identifies the query as MyQuery01. + Id *string `type:"string"` + + // The aggregation metric used for the data query. Currently only aggregation-latency + // is supported, indicating network latency. + Metric *string `type:"string" enum:"MetricType"` + + // The aggregation period used for the data query. + Period *string `type:"string" enum:"PeriodType"` + + // The Region or Availability Zone that's the source for the data query. For + // example, us-east-1. + Source *string `type:"string"` + + // Metric data aggregations over specified periods of time. The following are + // the supported Infrastructure Performance statistics: + // + // * p50 - The median value of the metric aggregated over a specified start + // and end time. For example, a metric of five_minutes is the median of all + // the data points gathered within those five minutes. + Statistic *string `type:"string" enum:"StatisticType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DataQuery) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DataQuery) GoString() string { + return s.String() +} + +// SetDestination sets the Destination field's value. +func (s *DataQuery) SetDestination(v string) *DataQuery { + s.Destination = &v + return s +} + +// SetId sets the Id field's value. +func (s *DataQuery) SetId(v string) *DataQuery { + s.Id = &v + return s +} + +// SetMetric sets the Metric field's value. +func (s *DataQuery) SetMetric(v string) *DataQuery { + s.Metric = &v + return s +} + +// SetPeriod sets the Period field's value. +func (s *DataQuery) SetPeriod(v string) *DataQuery { + s.Period = &v + return s +} + +// SetSource sets the Source field's value. +func (s *DataQuery) SetSource(v string) *DataQuery { + s.Source = &v + return s +} + +// SetStatistic sets the Statistic field's value. +func (s *DataQuery) SetStatistic(v string) *DataQuery { + s.Statistic = &v + return s +} + +// The response to a DataQuery. +type DataResponse struct { + _ struct{} `type:"structure"` + + // The Region or Availability Zone that's the destination for the data query. + // For example, eu-west-1. + Destination *string `locationName:"destination" type:"string"` + + // The ID passed in the DataQuery. + Id *string `locationName:"id" type:"string"` + + // The metric used for the network performance request. Currently only aggregate-latency + // is supported, showing network latency during a specified period. + Metric *string `locationName:"metric" type:"string" enum:"MetricType"` + + // A list of MetricPoint objects. + MetricPoints []*MetricPoint `locationName:"metricPointSet" locationNameList:"item" type:"list"` + + // The period used for the network performance request. + Period *string `locationName:"period" type:"string" enum:"PeriodType"` + + // The Region or Availability Zone that's the source for the data query. For + // example, us-east-1. + Source *string `locationName:"source" type:"string"` + + // The statistic used for the network performance request. + Statistic *string `locationName:"statistic" type:"string" enum:"StatisticType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DataResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DataResponse) GoString() string { + return s.String() +} + +// SetDestination sets the Destination field's value. +func (s *DataResponse) SetDestination(v string) *DataResponse { + s.Destination = &v + return s +} + +// SetId sets the Id field's value. +func (s *DataResponse) SetId(v string) *DataResponse { + s.Id = &v + return s +} + +// SetMetric sets the Metric field's value. +func (s *DataResponse) SetMetric(v string) *DataResponse { + s.Metric = &v + return s +} + +// SetMetricPoints sets the MetricPoints field's value. +func (s *DataResponse) SetMetricPoints(v []*MetricPoint) *DataResponse { + s.MetricPoints = v + return s +} + +// SetPeriod sets the Period field's value. +func (s *DataResponse) SetPeriod(v string) *DataResponse { + s.Period = &v + return s +} + +// SetSource sets the Source field's value. +func (s *DataResponse) SetSource(v string) *DataResponse { + s.Source = &v + return s +} + +// SetStatistic sets the Statistic field's value. +func (s *DataResponse) SetStatistic(v string) *DataResponse { + s.Statistic = &v + return s +} + type DeleteCarrierGatewayInput struct { _ struct{} `type:"structure"` @@ -73494,7 +75004,7 @@ func (s *DeleteCoipPoolInput) SetDryRun(v bool) *DeleteCoipPoolInput { type DeleteCoipPoolOutput struct { _ struct{} `type:"structure"` - // Describes a customer-owned address pool. + // Information about the CoIP address pool. CoipPool *CoipPool `locationName:"coipPool" type:"structure"` } @@ -75248,7 +76758,7 @@ func (s *DeleteLocalGatewayRouteTableInput) SetLocalGatewayRouteTableId(v string type DeleteLocalGatewayRouteTableOutput struct { _ struct{} `type:"structure"` - // Describes a local gateway route table. + // Information about the local gateway route table. LocalGatewayRouteTable *LocalGatewayRouteTable `locationName:"localGatewayRouteTable" type:"structure"` } @@ -75337,8 +76847,7 @@ func (s *DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociationInput) SetL type DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociationOutput struct { _ struct{} `type:"structure"` - // Describes an association between a local gateway route table and a virtual - // interface group. + // Information about the association. LocalGatewayRouteTableVirtualInterfaceGroupAssociation *LocalGatewayRouteTableVirtualInterfaceGroupAssociation `locationName:"localGatewayRouteTableVirtualInterfaceGroupAssociation" type:"structure"` } @@ -80228,6 +81737,122 @@ func (s *DescribeAccountAttributesOutput) SetAccountAttributes(v []*AccountAttri return s } +type DescribeAddressTransfersInput struct { + _ struct{} `type:"structure"` + + // The allocation IDs of Elastic IP addresses. + AllocationIds []*string `locationName:"AllocationId" locationNameList:"AllocationId" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The maximum number of address transfers to return in one page of results. + MaxResults *int64 `min:"5" type:"integer"` + + // Specify the pagination token from a previous request to retrieve the next + // page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeAddressTransfersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeAddressTransfersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAddressTransfersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAddressTransfersInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAllocationIds sets the AllocationIds field's value. +func (s *DescribeAddressTransfersInput) SetAllocationIds(v []*string) *DescribeAddressTransfersInput { + s.AllocationIds = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeAddressTransfersInput) SetDryRun(v bool) *DescribeAddressTransfersInput { + s.DryRun = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeAddressTransfersInput) SetMaxResults(v int64) *DescribeAddressTransfersInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeAddressTransfersInput) SetNextToken(v string) *DescribeAddressTransfersInput { + s.NextToken = &v + return s +} + +type DescribeAddressTransfersOutput struct { + _ struct{} `type:"structure"` + + // The Elastic IP address transfer. + AddressTransfers []*AddressTransfer `locationName:"addressTransferSet" locationNameList:"item" type:"list"` + + // Specify the pagination token from a previous request to retrieve the next + // page of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeAddressTransfersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeAddressTransfersOutput) GoString() string { + return s.String() +} + +// SetAddressTransfers sets the AddressTransfers field's value. +func (s *DescribeAddressTransfersOutput) SetAddressTransfers(v []*AddressTransfer) *DescribeAddressTransfersOutput { + s.AddressTransfers = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeAddressTransfersOutput) SetNextToken(v string) *DescribeAddressTransfersOutput { + s.NextToken = &v + return s +} + type DescribeAddressesAttributeInput struct { _ struct{} `type:"structure"` @@ -80694,6 +82319,109 @@ func (s *DescribeAvailabilityZonesOutput) SetAvailabilityZones(v []*Availability return s } +type DescribeAwsNetworkPerformanceMetricSubscriptionsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeAwsNetworkPerformanceMetricSubscriptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeAwsNetworkPerformanceMetricSubscriptionsInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeAwsNetworkPerformanceMetricSubscriptionsInput) SetDryRun(v bool) *DescribeAwsNetworkPerformanceMetricSubscriptionsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeAwsNetworkPerformanceMetricSubscriptionsInput) SetFilters(v []*Filter) *DescribeAwsNetworkPerformanceMetricSubscriptionsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeAwsNetworkPerformanceMetricSubscriptionsInput) SetMaxResults(v int64) *DescribeAwsNetworkPerformanceMetricSubscriptionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeAwsNetworkPerformanceMetricSubscriptionsInput) SetNextToken(v string) *DescribeAwsNetworkPerformanceMetricSubscriptionsInput { + s.NextToken = &v + return s +} + +type DescribeAwsNetworkPerformanceMetricSubscriptionsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Describes the current Infrastructure Performance subscriptions. + Subscriptions []*Subscription `locationName:"subscriptionSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeAwsNetworkPerformanceMetricSubscriptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeAwsNetworkPerformanceMetricSubscriptionsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeAwsNetworkPerformanceMetricSubscriptionsOutput) SetNextToken(v string) *DescribeAwsNetworkPerformanceMetricSubscriptionsOutput { + s.NextToken = &v + return s +} + +// SetSubscriptions sets the Subscriptions field's value. +func (s *DescribeAwsNetworkPerformanceMetricSubscriptionsOutput) SetSubscriptions(v []*Subscription) *DescribeAwsNetworkPerformanceMetricSubscriptionsOutput { + s.Subscriptions = v + return s +} + type DescribeBundleTasksInput struct { _ struct{} `type:"structure"` @@ -85595,7 +87323,7 @@ type DescribeImagesInput struct { // * owner-id - The Amazon Web Services account ID of the owner. We recommend // that you use the Owner request parameter instead of this filter. // - // * platform - The platform. To only list Windows-based AMIs, use windows. + // * platform - The platform. The only supported value is windows. // // * product-code - The product code. // @@ -98689,9 +100417,12 @@ type DescribeVpcEndpointServicesInput struct { // One or more filters. // + // * owner - The ID or alias of the Amazon Web Services account that owns + // the service. + // // * service-name - The name of the service. // - // * service-type - The type of service (Interface | Gateway). + // * service-type - The type of service (Interface | Gateway | GatewayLoadBalancer). // // * supported-ip-address-types - The IP address type (ipv4 | ipv6). // @@ -98836,16 +100567,6 @@ type DescribeVpcEndpointsInput struct { // // * service-name - The name of the service. // - // * vpc-id - The ID of the VPC in which the endpoint resides. - // - // * vpc-endpoint-id - The ID of the endpoint. - // - // * vpc-endpoint-state - The state of the endpoint (pendingAcceptance | - // pending | available | deleting | deleted | rejected | failed). - // - // * vpc-endpoint-type - The type of VPC endpoint (Interface | Gateway | - // GatewayLoadBalancer). - // // * tag: - The key/value combination of a tag assigned to the resource. // Use the tag key in the filter name and the tag value as the filter value. // For example, to find all resources that have a tag with the key Owner @@ -98855,6 +100576,16 @@ type DescribeVpcEndpointsInput struct { // * tag-key - The key of a tag assigned to the resource. Use this filter // to find all resources assigned a tag with a specific key, regardless of // the tag value. + // + // * vpc-id - The ID of the VPC in which the endpoint resides. + // + // * vpc-endpoint-id - The ID of the endpoint. + // + // * vpc-endpoint-state - The state of the endpoint (pendingAcceptance | + // pending | available | deleting | deleted | rejected | failed). + // + // * vpc-endpoint-type - The type of VPC endpoint (Interface | Gateway | + // GatewayLoadBalancer). Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The maximum number of items to return for this request. The request returns @@ -100294,6 +102025,198 @@ func (s *DirectoryServiceAuthenticationRequest) SetDirectoryId(v string) *Direct return s } +type DisableAddressTransferInput struct { + _ struct{} `type:"structure"` + + // The allocation ID of an Elastic IP address. + // + // AllocationId is a required field + AllocationId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisableAddressTransferInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisableAddressTransferInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisableAddressTransferInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisableAddressTransferInput"} + if s.AllocationId == nil { + invalidParams.Add(request.NewErrParamRequired("AllocationId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAllocationId sets the AllocationId field's value. +func (s *DisableAddressTransferInput) SetAllocationId(v string) *DisableAddressTransferInput { + s.AllocationId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DisableAddressTransferInput) SetDryRun(v bool) *DisableAddressTransferInput { + s.DryRun = &v + return s +} + +type DisableAddressTransferOutput struct { + _ struct{} `type:"structure"` + + // An Elastic IP address transfer. + AddressTransfer *AddressTransfer `locationName:"addressTransfer" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisableAddressTransferOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisableAddressTransferOutput) GoString() string { + return s.String() +} + +// SetAddressTransfer sets the AddressTransfer field's value. +func (s *DisableAddressTransferOutput) SetAddressTransfer(v *AddressTransfer) *DisableAddressTransferOutput { + s.AddressTransfer = v + return s +} + +type DisableAwsNetworkPerformanceMetricSubscriptionInput struct { + _ struct{} `type:"structure"` + + // The target Region or Availability Zone that the metric subscription is disabled + // for. For example, eu-north-1. + Destination *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The metric used for the disabled subscription. + Metric *string `type:"string" enum:"MetricType"` + + // The source Region or Availability Zone that the metric subscription is disabled + // for. For example, us-east-1. + Source *string `type:"string"` + + // The statistic used for the disabled subscription. + Statistic *string `type:"string" enum:"StatisticType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisableAwsNetworkPerformanceMetricSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisableAwsNetworkPerformanceMetricSubscriptionInput) GoString() string { + return s.String() +} + +// SetDestination sets the Destination field's value. +func (s *DisableAwsNetworkPerformanceMetricSubscriptionInput) SetDestination(v string) *DisableAwsNetworkPerformanceMetricSubscriptionInput { + s.Destination = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DisableAwsNetworkPerformanceMetricSubscriptionInput) SetDryRun(v bool) *DisableAwsNetworkPerformanceMetricSubscriptionInput { + s.DryRun = &v + return s +} + +// SetMetric sets the Metric field's value. +func (s *DisableAwsNetworkPerformanceMetricSubscriptionInput) SetMetric(v string) *DisableAwsNetworkPerformanceMetricSubscriptionInput { + s.Metric = &v + return s +} + +// SetSource sets the Source field's value. +func (s *DisableAwsNetworkPerformanceMetricSubscriptionInput) SetSource(v string) *DisableAwsNetworkPerformanceMetricSubscriptionInput { + s.Source = &v + return s +} + +// SetStatistic sets the Statistic field's value. +func (s *DisableAwsNetworkPerformanceMetricSubscriptionInput) SetStatistic(v string) *DisableAwsNetworkPerformanceMetricSubscriptionInput { + s.Statistic = &v + return s +} + +type DisableAwsNetworkPerformanceMetricSubscriptionOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether the unsubscribe action was successful. + Output *bool `locationName:"output" type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisableAwsNetworkPerformanceMetricSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisableAwsNetworkPerformanceMetricSubscriptionOutput) GoString() string { + return s.String() +} + +// SetOutput sets the Output field's value. +func (s *DisableAwsNetworkPerformanceMetricSubscriptionOutput) SetOutput(v bool) *DisableAwsNetworkPerformanceMetricSubscriptionOutput { + s.Output = &v + return s +} + type DisableEbsEncryptionByDefaultInput struct { _ struct{} `type:"structure"` @@ -103970,6 +105893,302 @@ func (s *ElasticInferenceAcceleratorAssociation) SetElasticInferenceAcceleratorA return s } +// ENA Express uses Amazon Web Services Scalable Reliable Datagram (SRD) technology +// to increase the maximum bandwidth used per stream and minimize tail latency +// of network traffic between EC2 instances. With ENA Express, you can communicate +// between two EC2 instances in the same subnet within the same account, or +// in different accounts. Both sending and receiving instances must have ENA +// Express enabled. +// +// To improve the reliability of network packet delivery, ENA Express reorders +// network packets on the receiving end by default. However, some UDP-based +// applications are designed to handle network packets that are out of order +// to reduce the overhead for packet delivery at the network layer. When ENA +// Express is enabled, you can specify whether UDP network traffic uses it. +type EnaSrdSpecification struct { + _ struct{} `type:"structure"` + + // Indicates whether ENA Express is enabled for the network interface. + EnaSrdEnabled *bool `type:"boolean"` + + // Configures ENA Express for UDP network traffic. + EnaSrdUdpSpecification *EnaSrdUdpSpecification `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnaSrdSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnaSrdSpecification) GoString() string { + return s.String() +} + +// SetEnaSrdEnabled sets the EnaSrdEnabled field's value. +func (s *EnaSrdSpecification) SetEnaSrdEnabled(v bool) *EnaSrdSpecification { + s.EnaSrdEnabled = &v + return s +} + +// SetEnaSrdUdpSpecification sets the EnaSrdUdpSpecification field's value. +func (s *EnaSrdSpecification) SetEnaSrdUdpSpecification(v *EnaSrdUdpSpecification) *EnaSrdSpecification { + s.EnaSrdUdpSpecification = v + return s +} + +// ENA Express is compatible with both TCP and UDP transport protocols. When +// it’s enabled, TCP traffic automatically uses it. However, some UDP-based +// applications are designed to handle network packets that are out of order, +// without a need for retransmission, such as live video broadcasting or other +// near-real-time applications. For UDP traffic, you can specify whether to +// use ENA Express, based on your application environment needs. +type EnaSrdUdpSpecification struct { + _ struct{} `type:"structure"` + + // Indicates whether UDP traffic uses ENA Express. To specify this setting, + // you must first enable ENA Express. + EnaSrdUdpEnabled *bool `type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnaSrdUdpSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnaSrdUdpSpecification) GoString() string { + return s.String() +} + +// SetEnaSrdUdpEnabled sets the EnaSrdUdpEnabled field's value. +func (s *EnaSrdUdpSpecification) SetEnaSrdUdpEnabled(v bool) *EnaSrdUdpSpecification { + s.EnaSrdUdpEnabled = &v + return s +} + +type EnableAddressTransferInput struct { + _ struct{} `type:"structure"` + + // The allocation ID of an Elastic IP address. + // + // AllocationId is a required field + AllocationId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the account that you want to transfer the Elastic IP address to. + // + // TransferAccountId is a required field + TransferAccountId *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnableAddressTransferInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnableAddressTransferInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableAddressTransferInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableAddressTransferInput"} + if s.AllocationId == nil { + invalidParams.Add(request.NewErrParamRequired("AllocationId")) + } + if s.TransferAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("TransferAccountId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAllocationId sets the AllocationId field's value. +func (s *EnableAddressTransferInput) SetAllocationId(v string) *EnableAddressTransferInput { + s.AllocationId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *EnableAddressTransferInput) SetDryRun(v bool) *EnableAddressTransferInput { + s.DryRun = &v + return s +} + +// SetTransferAccountId sets the TransferAccountId field's value. +func (s *EnableAddressTransferInput) SetTransferAccountId(v string) *EnableAddressTransferInput { + s.TransferAccountId = &v + return s +} + +type EnableAddressTransferOutput struct { + _ struct{} `type:"structure"` + + // An Elastic IP address transfer. + AddressTransfer *AddressTransfer `locationName:"addressTransfer" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnableAddressTransferOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnableAddressTransferOutput) GoString() string { + return s.String() +} + +// SetAddressTransfer sets the AddressTransfer field's value. +func (s *EnableAddressTransferOutput) SetAddressTransfer(v *AddressTransfer) *EnableAddressTransferOutput { + s.AddressTransfer = v + return s +} + +type EnableAwsNetworkPerformanceMetricSubscriptionInput struct { + _ struct{} `type:"structure"` + + // The target Region or Availability Zone that the metric subscription is enabled + // for. For example, eu-west-1. + Destination *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The metric used for the enabled subscription. + Metric *string `type:"string" enum:"MetricType"` + + // The source Region or Availability Zone that the metric subscription is enabled + // for. For example, us-east-1. + Source *string `type:"string"` + + // The statistic used for the enabled subscription. + Statistic *string `type:"string" enum:"StatisticType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnableAwsNetworkPerformanceMetricSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnableAwsNetworkPerformanceMetricSubscriptionInput) GoString() string { + return s.String() +} + +// SetDestination sets the Destination field's value. +func (s *EnableAwsNetworkPerformanceMetricSubscriptionInput) SetDestination(v string) *EnableAwsNetworkPerformanceMetricSubscriptionInput { + s.Destination = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *EnableAwsNetworkPerformanceMetricSubscriptionInput) SetDryRun(v bool) *EnableAwsNetworkPerformanceMetricSubscriptionInput { + s.DryRun = &v + return s +} + +// SetMetric sets the Metric field's value. +func (s *EnableAwsNetworkPerformanceMetricSubscriptionInput) SetMetric(v string) *EnableAwsNetworkPerformanceMetricSubscriptionInput { + s.Metric = &v + return s +} + +// SetSource sets the Source field's value. +func (s *EnableAwsNetworkPerformanceMetricSubscriptionInput) SetSource(v string) *EnableAwsNetworkPerformanceMetricSubscriptionInput { + s.Source = &v + return s +} + +// SetStatistic sets the Statistic field's value. +func (s *EnableAwsNetworkPerformanceMetricSubscriptionInput) SetStatistic(v string) *EnableAwsNetworkPerformanceMetricSubscriptionInput { + s.Statistic = &v + return s +} + +type EnableAwsNetworkPerformanceMetricSubscriptionOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether the subscribe action was successful. + Output *bool `locationName:"output" type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnableAwsNetworkPerformanceMetricSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnableAwsNetworkPerformanceMetricSubscriptionOutput) GoString() string { + return s.String() +} + +// SetOutput sets the Output field's value. +func (s *EnableAwsNetworkPerformanceMetricSubscriptionOutput) SetOutput(v bool) *EnableAwsNetworkPerformanceMetricSubscriptionOutput { + s.Output = &v + return s +} + type EnableEbsEncryptionByDefaultInput struct { _ struct{} `type:"structure"` @@ -104627,7 +106846,8 @@ type EnableImageDeprecationInput struct { // the seconds to the nearest minute. // // You can’t specify a date in the past. The upper limit for DeprecateAt is - // 10 years from now. + // 10 years from now, except for public AMIs, where the upper limit is 2 years + // from the creation date. // // DeprecateAt is a required field DeprecateAt *time.Time `type:"timestamp" required:"true"` @@ -104816,6 +107036,66 @@ func (s *EnableIpamOrganizationAdminAccountOutput) SetSuccess(v bool) *EnableIpa return s } +type EnableReachabilityAnalyzerOrganizationSharingInput struct { + _ struct{} `type:"structure"` + + DryRun *bool `type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnableReachabilityAnalyzerOrganizationSharingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnableReachabilityAnalyzerOrganizationSharingInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *EnableReachabilityAnalyzerOrganizationSharingInput) SetDryRun(v bool) *EnableReachabilityAnalyzerOrganizationSharingInput { + s.DryRun = &v + return s +} + +type EnableReachabilityAnalyzerOrganizationSharingOutput struct { + _ struct{} `type:"structure"` + + ReturnValue *bool `locationName:"returnValue" type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnableReachabilityAnalyzerOrganizationSharingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnableReachabilityAnalyzerOrganizationSharingOutput) GoString() string { + return s.String() +} + +// SetReturnValue sets the ReturnValue field's value. +func (s *EnableReachabilityAnalyzerOrganizationSharingOutput) SetReturnValue(v bool) *EnableReachabilityAnalyzerOrganizationSharingOutput { + s.ReturnValue = &v + return s +} + type EnableSerialConsoleAccessInput struct { _ struct{} `type:"structure"` @@ -108157,7 +110437,7 @@ func (s *FleetLaunchTemplateOverridesRequest) SetWeightedCapacity(v float64) *Fl // // For information about launch templates, see Launch an instance from a launch // template (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html) -// in the Amazon EC2 User Guide for Linux Instances. +// in the Amazon EC2 User Guide. type FleetLaunchTemplateSpecification struct { _ struct{} `type:"structure"` @@ -109304,6 +111584,129 @@ func (s *GetAssociatedIpv6PoolCidrsOutput) SetNextToken(v string) *GetAssociated return s } +type GetAwsNetworkPerformanceDataInput struct { + _ struct{} `type:"structure"` + + // A list of network performance data queries. + DataQueries []*DataQuery `locationName:"DataQuery" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ending time for the performance data request. The end time must be formatted + // as yyyy-mm-ddThh:mm:ss. For example, 2022-06-12T12:00:00.000Z. + EndTime *time.Time `type:"timestamp"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The starting time for the performance data request. The starting time must + // be formatted as yyyy-mm-ddThh:mm:ss. For example, 2022-06-10T12:00:00.000Z. + StartTime *time.Time `type:"timestamp"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetAwsNetworkPerformanceDataInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetAwsNetworkPerformanceDataInput) GoString() string { + return s.String() +} + +// SetDataQueries sets the DataQueries field's value. +func (s *GetAwsNetworkPerformanceDataInput) SetDataQueries(v []*DataQuery) *GetAwsNetworkPerformanceDataInput { + s.DataQueries = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *GetAwsNetworkPerformanceDataInput) SetDryRun(v bool) *GetAwsNetworkPerformanceDataInput { + s.DryRun = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *GetAwsNetworkPerformanceDataInput) SetEndTime(v time.Time) *GetAwsNetworkPerformanceDataInput { + s.EndTime = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetAwsNetworkPerformanceDataInput) SetMaxResults(v int64) *GetAwsNetworkPerformanceDataInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetAwsNetworkPerformanceDataInput) SetNextToken(v string) *GetAwsNetworkPerformanceDataInput { + s.NextToken = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *GetAwsNetworkPerformanceDataInput) SetStartTime(v time.Time) *GetAwsNetworkPerformanceDataInput { + s.StartTime = &v + return s +} + +type GetAwsNetworkPerformanceDataOutput struct { + _ struct{} `type:"structure"` + + // The list of data responses. + DataResponses []*DataResponse `locationName:"dataResponseSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetAwsNetworkPerformanceDataOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetAwsNetworkPerformanceDataOutput) GoString() string { + return s.String() +} + +// SetDataResponses sets the DataResponses field's value. +func (s *GetAwsNetworkPerformanceDataOutput) SetDataResponses(v []*DataResponse) *GetAwsNetworkPerformanceDataOutput { + s.DataResponses = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetAwsNetworkPerformanceDataOutput) SetNextToken(v string) *GetAwsNetworkPerformanceDataOutput { + s.NextToken = &v + return s +} + type GetCapacityReservationUsageInput struct { _ struct{} `type:"structure"` @@ -111173,7 +113576,7 @@ type GetIpamResourceCidrsInput struct { // The ID of the Amazon Web Services account that owns the resource. ResourceOwner *string `type:"string"` - // A tag on an IPAM resource. + // The resource tag. ResourceTag *RequestIpamResourceTag `type:"structure"` // The resource type. @@ -115058,7 +117461,7 @@ type Image struct { Platform *string `locationName:"platform" type:"string" enum:"PlatformValues"` // The platform details associated with the billing code of the AMI. For more - // information, see Understanding AMI billing (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-billing-info.html) + // information, see Understand AMI billing information (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-billing-info.html) // in the Amazon Elastic Compute Cloud User Guide. PlatformDetails *string `locationName:"platformDetails" type:"string"` @@ -119731,6 +122134,16 @@ func (s *InstancePrivateIpAddress) SetPrivateIpAddress(v string) *InstancePrivat // all of the specified attributes. If you specify multiple values for an attribute, // you get instance types that satisfy any of the specified values. // +// To limit the list of instance types from which Amazon EC2 can identify matching +// instance types, you can use one of the following parameters, but not both +// in the same request: +// +// - AllowedInstanceTypes - The instance types to include in the list. All +// other instance types are ignored, even if they match your specified attributes. +// +// - ExcludedInstanceTypes - The instance types to exclude from the list, +// even if they match your specified attributes. +// // You must specify VCpuCount and MemoryMiB. All other attributes are optional. // Any unspecified optional attribute is set to its default. // @@ -119803,6 +122216,23 @@ type InstanceRequirements struct { // Default: Any accelerator type AcceleratorTypes []*string `locationName:"acceleratorTypeSet" locationNameList:"item" type:"list" enum:"AcceleratorType"` + // The instance types to apply your specified attributes against. All other + // instance types are ignored, even if they match your specified attributes. + // + // You can use strings with one or more wild cards, represented by an asterisk + // (*), to allow an instance type, size, or generation. The following are examples: + // m5.8xlarge, c5*.*, m5a.*, r*, *3*. + // + // For example, if you specify c5*,Amazon EC2 will allow the entire C5 instance + // family, which includes all C5a and C5n instance types. If you specify m5a.*, + // Amazon EC2 will allow all the M5a instance types, but not the M5n instance + // types. + // + // If you specify AllowedInstanceTypes, you can't specify ExcludedInstanceTypes. + // + // Default: All instance types + AllowedInstanceTypes []*string `locationName:"allowedInstanceTypeSet" locationNameList:"item" type:"list"` + // Indicates whether bare metal instance types must be included, excluded, or // required. // @@ -119860,6 +122290,8 @@ type InstanceRequirements struct { // Amazon EC2 will exclude all the M5a instance types, but not the M5n instance // types. // + // If you specify ExcludedInstanceTypes, you can't specify AllowedInstanceTypes. + // // Default: No excluded instance types ExcludedInstanceTypes []*string `locationName:"excludedInstanceTypeSet" locationNameList:"item" type:"list"` @@ -119907,6 +122339,12 @@ type InstanceRequirements struct { // The minimum and maximum amount of memory, in MiB. MemoryMiB *MemoryMiB `locationName:"memoryMiB" type:"structure"` + // The minimum and maximum amount of network bandwidth, in gigabits per second + // (Gbps). + // + // Default: No minimum or maximum limits + NetworkBandwidthGbps *NetworkBandwidthGbps `locationName:"networkBandwidthGbps" type:"structure"` + // The minimum and maximum number of network interfaces. // // Default: No minimum or maximum limits @@ -120016,6 +122454,12 @@ func (s *InstanceRequirements) SetAcceleratorTypes(v []*string) *InstanceRequire return s } +// SetAllowedInstanceTypes sets the AllowedInstanceTypes field's value. +func (s *InstanceRequirements) SetAllowedInstanceTypes(v []*string) *InstanceRequirements { + s.AllowedInstanceTypes = v + return s +} + // SetBareMetal sets the BareMetal field's value. func (s *InstanceRequirements) SetBareMetal(v string) *InstanceRequirements { s.BareMetal = &v @@ -120076,6 +122520,12 @@ func (s *InstanceRequirements) SetMemoryMiB(v *MemoryMiB) *InstanceRequirements return s } +// SetNetworkBandwidthGbps sets the NetworkBandwidthGbps field's value. +func (s *InstanceRequirements) SetNetworkBandwidthGbps(v *NetworkBandwidthGbps) *InstanceRequirements { + s.NetworkBandwidthGbps = v + return s +} + // SetNetworkInterfaceCount sets the NetworkInterfaceCount field's value. func (s *InstanceRequirements) SetNetworkInterfaceCount(v *NetworkInterfaceCount) *InstanceRequirements { s.NetworkInterfaceCount = v @@ -120119,6 +122569,16 @@ func (s *InstanceRequirements) SetVCpuCount(v *VCpuCountRange) *InstanceRequirem // all of the specified attributes. If you specify multiple values for an attribute, // you get instance types that satisfy any of the specified values. // +// To limit the list of instance types from which Amazon EC2 can identify matching +// instance types, you can use one of the following parameters, but not both +// in the same request: +// +// - AllowedInstanceTypes - The instance types to include in the list. All +// other instance types are ignored, even if they match your specified attributes. +// +// - ExcludedInstanceTypes - The instance types to exclude from the list, +// even if they match your specified attributes. +// // You must specify VCpuCount and MemoryMiB. All other attributes are optional. // Any unspecified optional attribute is set to its default. // @@ -120191,6 +122651,23 @@ type InstanceRequirementsRequest struct { // Default: Any accelerator type AcceleratorTypes []*string `locationName:"AcceleratorType" locationNameList:"item" type:"list" enum:"AcceleratorType"` + // The instance types to apply your specified attributes against. All other + // instance types are ignored, even if they match your specified attributes. + // + // You can use strings with one or more wild cards, represented by an asterisk + // (*), to allow an instance type, size, or generation. The following are examples: + // m5.8xlarge, c5*.*, m5a.*, r*, *3*. + // + // For example, if you specify c5*,Amazon EC2 will allow the entire C5 instance + // family, which includes all C5a and C5n instance types. If you specify m5a.*, + // Amazon EC2 will allow all the M5a instance types, but not the M5n instance + // types. + // + // If you specify AllowedInstanceTypes, you can't specify ExcludedInstanceTypes. + // + // Default: All instance types + AllowedInstanceTypes []*string `locationName:"AllowedInstanceType" locationNameList:"item" type:"list"` + // Indicates whether bare metal instance types must be included, excluded, or // required. // @@ -120248,6 +122725,8 @@ type InstanceRequirementsRequest struct { // Amazon EC2 will exclude all the M5a instance types, but not the M5n instance // types. // + // If you specify ExcludedInstanceTypes, you can't specify AllowedInstanceTypes. + // // Default: No excluded instance types ExcludedInstanceTypes []*string `locationName:"ExcludedInstanceType" locationNameList:"item" type:"list"` @@ -120297,6 +122776,12 @@ type InstanceRequirementsRequest struct { // MemoryMiB is a required field MemoryMiB *MemoryMiBRequest `type:"structure" required:"true"` + // The minimum and maximum amount of network bandwidth, in gigabits per second + // (Gbps). + // + // Default: No minimum or maximum limits + NetworkBandwidthGbps *NetworkBandwidthGbpsRequest `type:"structure"` + // The minimum and maximum number of network interfaces. // // Default: No minimum or maximum limits @@ -120434,6 +122919,12 @@ func (s *InstanceRequirementsRequest) SetAcceleratorTypes(v []*string) *Instance return s } +// SetAllowedInstanceTypes sets the AllowedInstanceTypes field's value. +func (s *InstanceRequirementsRequest) SetAllowedInstanceTypes(v []*string) *InstanceRequirementsRequest { + s.AllowedInstanceTypes = v + return s +} + // SetBareMetal sets the BareMetal field's value. func (s *InstanceRequirementsRequest) SetBareMetal(v string) *InstanceRequirementsRequest { s.BareMetal = &v @@ -120494,6 +122985,12 @@ func (s *InstanceRequirementsRequest) SetMemoryMiB(v *MemoryMiBRequest) *Instanc return s } +// SetNetworkBandwidthGbps sets the NetworkBandwidthGbps field's value. +func (s *InstanceRequirementsRequest) SetNetworkBandwidthGbps(v *NetworkBandwidthGbpsRequest) *InstanceRequirementsRequest { + s.NetworkBandwidthGbps = v + return s +} + // SetNetworkInterfaceCount sets the NetworkInterfaceCount field's value. func (s *InstanceRequirementsRequest) SetNetworkInterfaceCount(v *NetworkInterfaceCountRequest) *InstanceRequirementsRequest { s.NetworkInterfaceCount = v @@ -125760,6 +128257,10 @@ type LaunchTemplatePlacement struct { // The Availability Zone of the instance. AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + // The Group ID of the placement group. You must specify the Placement Group + // Group ID to launch an instance in a shared placement group. + GroupId *string `locationName:"groupId" type:"string"` + // The name of the placement group for the instance. GroupName *string `locationName:"groupName" type:"string"` @@ -125811,6 +128312,12 @@ func (s *LaunchTemplatePlacement) SetAvailabilityZone(v string) *LaunchTemplateP return s } +// SetGroupId sets the GroupId field's value. +func (s *LaunchTemplatePlacement) SetGroupId(v string) *LaunchTemplatePlacement { + s.GroupId = &v + return s +} + // SetGroupName sets the GroupName field's value. func (s *LaunchTemplatePlacement) SetGroupName(v string) *LaunchTemplatePlacement { s.GroupName = &v @@ -125857,6 +128364,10 @@ type LaunchTemplatePlacementRequest struct { // The Availability Zone for the instance. AvailabilityZone *string `type:"string"` + // The Group Id of a placement group. You must specify the Placement Group Group + // Id to launch an instance in a shared placement group. + GroupId *string `type:"string"` + // The name of the placement group for the instance. GroupName *string `type:"string"` @@ -125910,6 +128421,12 @@ func (s *LaunchTemplatePlacementRequest) SetAvailabilityZone(v string) *LaunchTe return s } +// SetGroupId sets the GroupId field's value. +func (s *LaunchTemplatePlacementRequest) SetGroupId(v string) *LaunchTemplatePlacementRequest { + s.GroupId = &v + return s +} + // SetGroupName sets the GroupName field's value. func (s *LaunchTemplatePlacementRequest) SetGroupName(v string) *LaunchTemplatePlacementRequest { s.GroupName = &v @@ -127223,7 +129740,7 @@ type LocalGatewayRouteTable struct { // The state of the local gateway route table. State *string `locationName:"state" type:"string"` - // Describes a state change. + // Information about the state change. StateReason *StateReason `locationName:"stateReason" type:"structure"` // The tags assigned to the local gateway route table. @@ -128003,6 +130520,69 @@ func (s *MemoryMiBRequest) SetMin(v int64) *MemoryMiBRequest { return s } +// Indicates whether the network was healthy or unhealthy at a particular point. +// The value is aggregated from the startDate to the endDate. Currently only +// five_minutes is supported. +type MetricPoint struct { + _ struct{} `type:"structure"` + + // The end date for the metric point. The ending time must be formatted as yyyy-mm-ddThh:mm:ss. + // For example, 2022-06-12T12:00:00.000Z. + EndDate *time.Time `locationName:"endDate" type:"timestamp"` + + // The start date for the metric point. The starting date for the metric point. + // The starting time must be formatted as yyyy-mm-ddThh:mm:ss. For example, + // 2022-06-10T12:00:00.000Z. + StartDate *time.Time `locationName:"startDate" type:"timestamp"` + + // The status of the metric point. + Status *string `locationName:"status" type:"string"` + + Value *float64 `locationName:"value" type:"float"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricPoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricPoint) GoString() string { + return s.String() +} + +// SetEndDate sets the EndDate field's value. +func (s *MetricPoint) SetEndDate(v time.Time) *MetricPoint { + s.EndDate = &v + return s +} + +// SetStartDate sets the StartDate field's value. +func (s *MetricPoint) SetStartDate(v time.Time) *MetricPoint { + s.StartDate = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *MetricPoint) SetStatus(v string) *MetricPoint { + s.Status = &v + return s +} + +// SetValue sets the Value field's value. +func (s *MetricPoint) SetValue(v float64) *MetricPoint { + s.Value = &v + return s +} + type ModifyAddressAttributeInput struct { _ struct{} `type:"structure"` @@ -130319,7 +132899,7 @@ func (s *ModifyInstanceEventStartTimeInput) SetNotBefore(v time.Time) *ModifyIns type ModifyInstanceEventStartTimeOutput struct { _ struct{} `type:"structure"` - // Describes a scheduled event for an instance. + // Information about the event. Event *InstanceStatusEvent `locationName:"event" type:"structure"` } @@ -130764,6 +133344,10 @@ type ModifyInstancePlacementInput struct { // The affinity setting for the instance. Affinity *string `locationName:"affinity" type:"string" enum:"Affinity"` + // The Group Id of a placement group. You must specify the Placement Group Group + // Id to launch an instance in a shared placement group. + GroupId *string `type:"string"` + // The name of the placement group in which to place the instance. For spread // placement groups, the instance must have a tenancy of default. For cluster // and partition placement groups, the instance must have a tenancy of default @@ -130832,6 +133416,12 @@ func (s *ModifyInstancePlacementInput) SetAffinity(v string) *ModifyInstancePlac return s } +// SetGroupId sets the GroupId field's value. +func (s *ModifyInstancePlacementInput) SetGroupId(v string) *ModifyInstancePlacementInput { + s.GroupId = &v + return s +} + // SetGroupName sets the GroupName field's value. func (s *ModifyInstancePlacementInput) SetGroupName(v string) *ModifyInstancePlacementInput { s.GroupName = &v @@ -131330,7 +133920,7 @@ func (s *ModifyIpamResourceCidrInput) SetResourceRegion(v string) *ModifyIpamRes type ModifyIpamResourceCidrOutput struct { _ struct{} `type:"structure"` - // The CIDR for an IPAM resource. + // The CIDR of the resource. IpamResourceCidr *IpamResourceCidr `locationName:"ipamResourceCidr" type:"structure"` } @@ -131673,7 +134263,7 @@ func (s *ModifyLocalGatewayRouteInput) SetNetworkInterfaceId(v string) *ModifyLo type ModifyLocalGatewayRouteOutput struct { _ struct{} `type:"structure"` - // Describes a route for a local gateway route table. + // Information about the local gateway route table. Route *LocalGatewayRoute `locationName:"route" type:"structure"` } @@ -131865,7 +134455,7 @@ func (s *ModifyManagedPrefixListOutput) SetPrefixList(v *ManagedPrefixList) *Mod type ModifyNetworkInterfaceAttributeInput struct { _ struct{} `type:"structure"` - // Information about the interface attachment. If modifying the 'delete on termination' + // Information about the interface attachment. If modifying the delete on termination // attribute, you must specify the ID of the interface attachment. Attachment *NetworkInterfaceAttachmentChanges `locationName:"attachment" type:"structure"` @@ -131878,6 +134468,10 @@ type ModifyNetworkInterfaceAttributeInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` + // Updates the ENA Express configuration for the network interface that’s + // attached to the instance. + EnaSrdSpecification *EnaSrdSpecification `type:"structure"` + // Changes the security groups for the network interface. The new set of groups // you specify replaces the current set. You must specify at least one group, // even if it's just the default security group in the VPC. You must specify @@ -131947,6 +134541,12 @@ func (s *ModifyNetworkInterfaceAttributeInput) SetDryRun(v bool) *ModifyNetworkI return s } +// SetEnaSrdSpecification sets the EnaSrdSpecification field's value. +func (s *ModifyNetworkInterfaceAttributeInput) SetEnaSrdSpecification(v *EnaSrdSpecification) *ModifyNetworkInterfaceAttributeInput { + s.EnaSrdSpecification = v + return s +} + // SetGroups sets the Groups field's value. func (s *ModifyNetworkInterfaceAttributeInput) SetGroups(v []*string) *ModifyNetworkInterfaceAttributeInput { s.Groups = v @@ -133512,7 +136112,7 @@ func (s *ModifyTransitGatewayOptions) SetVpnEcmpSupport(v string) *ModifyTransit type ModifyTransitGatewayOutput struct { _ struct{} `type:"structure"` - // Describes a transit gateway. + // Information about the transit gateway. TransitGateway *TransitGateway `locationName:"transitGateway" type:"structure"` } @@ -135314,7 +137914,7 @@ func (s *ModifyVpnConnectionOptionsInput) SetVpnConnectionId(v string) *ModifyVp type ModifyVpnConnectionOptionsOutput struct { _ struct{} `type:"structure"` - // Describes a VPN connection. + // Information about the VPN connection. VpnConnection *VpnConnection `locationName:"vpnConnection" type:"structure"` } @@ -135345,7 +137945,7 @@ func (s *ModifyVpnConnectionOptionsOutput) SetVpnConnection(v *VpnConnection) *M type ModifyVpnConnectionOutput struct { _ struct{} `type:"structure"` - // Describes a VPN connection. + // Information about the VPN connection. VpnConnection *VpnConnection `locationName:"vpnConnection" type:"structure"` } @@ -135448,7 +138048,7 @@ func (s *ModifyVpnTunnelCertificateInput) SetVpnTunnelOutsideIpAddress(v string) type ModifyVpnTunnelCertificateOutput struct { _ struct{} `type:"structure"` - // Describes a VPN connection. + // Information about the VPN connection. VpnConnection *VpnConnection `locationName:"vpnConnection" type:"structure"` } @@ -135565,7 +138165,7 @@ func (s *ModifyVpnTunnelOptionsInput) SetVpnTunnelOutsideIpAddress(v string) *Mo type ModifyVpnTunnelOptionsOutput struct { _ struct{} `type:"structure"` - // Describes a VPN connection. + // Information about the VPN connection. VpnConnection *VpnConnection `locationName:"vpnConnection" type:"structure"` } @@ -136184,8 +138784,7 @@ func (s *MoveByoipCidrToIpamInput) SetIpamPoolOwner(v string) *MoveByoipCidrToIp type MoveByoipCidrToIpamOutput struct { _ struct{} `type:"structure"` - // Information about an address range that is provisioned for use with your - // Amazon Web Services resources through bring your own IP addresses (BYOIP). + // The BYOIP CIDR. ByoipCidr *ByoipCidr `locationName:"byoipCidr" type:"structure"` } @@ -136724,6 +139323,108 @@ func (s *NetworkAclEntry) SetRuleNumber(v int64) *NetworkAclEntry { return s } +// The minimum and maximum amount of network bandwidth, in gigabits per second +// (Gbps). +// +// Setting the minimum bandwidth does not guarantee that your instance will +// achieve the minimum bandwidth. Amazon EC2 will identify instance types that +// support the specified minimum bandwidth, but the actual bandwidth of your +// instance might go below the specified minimum at times. For more information, +// see Available instance bandwidth (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-network-bandwidth.html#available-instance-bandwidth) +// in the Amazon EC2 User Guide. +type NetworkBandwidthGbps struct { + _ struct{} `type:"structure"` + + // The maximum amount of network bandwidth, in Gbps. If this parameter is not + // specified, there is no maximum limit. + Max *float64 `locationName:"max" type:"double"` + + // The minimum amount of network bandwidth, in Gbps. If this parameter is not + // specified, there is no minimum limit. + Min *float64 `locationName:"min" type:"double"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NetworkBandwidthGbps) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NetworkBandwidthGbps) GoString() string { + return s.String() +} + +// SetMax sets the Max field's value. +func (s *NetworkBandwidthGbps) SetMax(v float64) *NetworkBandwidthGbps { + s.Max = &v + return s +} + +// SetMin sets the Min field's value. +func (s *NetworkBandwidthGbps) SetMin(v float64) *NetworkBandwidthGbps { + s.Min = &v + return s +} + +// The minimum and maximum amount of network bandwidth, in gigabits per second +// (Gbps). +// +// Setting the minimum bandwidth does not guarantee that your instance will +// achieve the minimum bandwidth. Amazon EC2 will identify instance types that +// support the specified minimum bandwidth, but the actual bandwidth of your +// instance might go below the specified minimum at times. For more information, +// see Available instance bandwidth (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-network-bandwidth.html#available-instance-bandwidth) +// in the Amazon EC2 User Guide. +type NetworkBandwidthGbpsRequest struct { + _ struct{} `type:"structure"` + + // The maximum amount of network bandwidth, in Gbps. To specify no maximum limit, + // omit this parameter. + Max *float64 `type:"double"` + + // The minimum amount of network bandwidth, in Gbps. To specify no minimum limit, + // omit this parameter. + Min *float64 `type:"double"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NetworkBandwidthGbpsRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NetworkBandwidthGbpsRequest) GoString() string { + return s.String() +} + +// SetMax sets the Max field's value. +func (s *NetworkBandwidthGbpsRequest) SetMax(v float64) *NetworkBandwidthGbpsRequest { + s.Max = &v + return s +} + +// SetMin sets the Min field's value. +func (s *NetworkBandwidthGbpsRequest) SetMin(v float64) *NetworkBandwidthGbpsRequest { + s.Min = &v + return s +} + // Describes the network card support of the instance type. type NetworkCardInfo struct { _ struct{} `type:"structure"` @@ -136787,6 +139488,12 @@ type NetworkInfo struct { // Indicates whether Elastic Fabric Adapter (EFA) is supported. EfaSupported *bool `locationName:"efaSupported" type:"boolean"` + // Indicates whether the instance type supports ENA Express. ENA Express uses + // Amazon Web Services Scalable Reliable Datagram (SRD) technology to increase + // the maximum bandwidth used per stream and minimize tail latency of network + // traffic between EC2 instances. + EnaSrdSupported *bool `locationName:"enaSrdSupported" type:"boolean"` + // Indicates whether Elastic Network Adapter (ENA) is supported. EnaSupport *string `locationName:"enaSupport" type:"string" enum:"EnaSupport"` @@ -136853,6 +139560,12 @@ func (s *NetworkInfo) SetEfaSupported(v bool) *NetworkInfo { return s } +// SetEnaSrdSupported sets the EnaSrdSupported field's value. +func (s *NetworkInfo) SetEnaSrdSupported(v bool) *NetworkInfo { + s.EnaSrdSupported = &v + return s +} + // SetEnaSupport sets the EnaSupport field's value. func (s *NetworkInfo) SetEnaSupport(v string) *NetworkInfo { s.EnaSupport = &v @@ -137151,6 +139864,8 @@ func (s *NetworkInsightsAccessScopeContent) SetNetworkInsightsAccessScopeId(v st type NetworkInsightsAnalysis struct { _ struct{} `type:"structure"` + AdditionalAccounts []*string `locationName:"additionalAccountSet" locationNameList:"item" type:"list"` + // Potential intermediate components. AlternatePathHints []*AlternatePathHint `locationName:"alternatePathHintSet" locationNameList:"item" type:"list"` @@ -137189,6 +139904,8 @@ type NetworkInsightsAnalysis struct { // The status message, if the status is failed. StatusMessage *string `locationName:"statusMessage" type:"string"` + SuggestedAccounts []*string `locationName:"suggestedAccountSet" locationNameList:"item" type:"list"` + // The tags. Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` @@ -137214,6 +139931,12 @@ func (s NetworkInsightsAnalysis) GoString() string { return s.String() } +// SetAdditionalAccounts sets the AdditionalAccounts field's value. +func (s *NetworkInsightsAnalysis) SetAdditionalAccounts(v []*string) *NetworkInsightsAnalysis { + s.AdditionalAccounts = v + return s +} + // SetAlternatePathHints sets the AlternatePathHints field's value. func (s *NetworkInsightsAnalysis) SetAlternatePathHints(v []*AlternatePathHint) *NetworkInsightsAnalysis { s.AlternatePathHints = v @@ -137286,6 +140009,12 @@ func (s *NetworkInsightsAnalysis) SetStatusMessage(v string) *NetworkInsightsAna return s } +// SetSuggestedAccounts sets the SuggestedAccounts field's value. +func (s *NetworkInsightsAnalysis) SetSuggestedAccounts(v []*string) *NetworkInsightsAnalysis { + s.SuggestedAccounts = v + return s +} + // SetTags sets the Tags field's value. func (s *NetworkInsightsAnalysis) SetTags(v []*Tag) *NetworkInsightsAnalysis { s.Tags = v @@ -137308,6 +140037,8 @@ type NetworkInsightsPath struct { // The Amazon Web Services resource that is the destination of the path. Destination *string `locationName:"destination" type:"string"` + DestinationArn *string `locationName:"destinationArn" min:"1" type:"string"` + // The IP address of the Amazon Web Services resource that is the destination // of the path. DestinationIp *string `locationName:"destinationIp" type:"string"` @@ -137327,6 +140058,8 @@ type NetworkInsightsPath struct { // The Amazon Web Services resource that is the source of the path. Source *string `locationName:"source" type:"string"` + SourceArn *string `locationName:"sourceArn" min:"1" type:"string"` + // The IP address of the Amazon Web Services resource that is the source of // the path. SourceIp *string `locationName:"sourceIp" type:"string"` @@ -137365,6 +140098,12 @@ func (s *NetworkInsightsPath) SetDestination(v string) *NetworkInsightsPath { return s } +// SetDestinationArn sets the DestinationArn field's value. +func (s *NetworkInsightsPath) SetDestinationArn(v string) *NetworkInsightsPath { + s.DestinationArn = &v + return s +} + // SetDestinationIp sets the DestinationIp field's value. func (s *NetworkInsightsPath) SetDestinationIp(v string) *NetworkInsightsPath { s.DestinationIp = &v @@ -137401,6 +140140,12 @@ func (s *NetworkInsightsPath) SetSource(v string) *NetworkInsightsPath { return s } +// SetSourceArn sets the SourceArn field's value. +func (s *NetworkInsightsPath) SetSourceArn(v string) *NetworkInsightsPath { + s.SourceArn = &v + return s +} + // SetSourceIp sets the SourceIp field's value. func (s *NetworkInsightsPath) SetSourceIp(v string) *NetworkInsightsPath { s.SourceIp = &v @@ -137783,6 +140528,10 @@ type NetworkInterfaceAttachment struct { // The device index of the network interface attachment on the instance. DeviceIndex *int64 `locationName:"deviceIndex" type:"integer"` + // Configures ENA Express for the network interface that this action attaches + // to the instance. + EnaSrdSpecification *AttachmentEnaSrdSpecification `locationName:"enaSrdSpecification" type:"structure"` + // The ID of the instance. InstanceId *string `locationName:"instanceId" type:"string"` @@ -137838,6 +140587,12 @@ func (s *NetworkInterfaceAttachment) SetDeviceIndex(v int64) *NetworkInterfaceAt return s } +// SetEnaSrdSpecification sets the EnaSrdSpecification field's value. +func (s *NetworkInterfaceAttachment) SetEnaSrdSpecification(v *AttachmentEnaSrdSpecification) *NetworkInterfaceAttachment { + s.EnaSrdSpecification = v + return s +} + // SetInstanceId sets the InstanceId field's value. func (s *NetworkInterfaceAttachment) SetInstanceId(v string) *NetworkInterfaceAttachment { s.InstanceId = &v @@ -139536,11 +142291,10 @@ func (s *Phase2IntegrityAlgorithmsRequestListValue) SetValue(v string) *Phase2In type Placement struct { _ struct{} `type:"structure"` - // The affinity setting for the instance on the Dedicated Host. This parameter - // is not supported for the ImportInstance (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ImportInstance.html) - // command. + // The affinity setting for the instance on the Dedicated Host. // - // This parameter is not supported by CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). + // This parameter is not supported for CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet) + // or ImportInstance (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ImportInstance.html). Affinity *string `locationName:"affinity" type:"string"` // The Availability Zone of the instance. @@ -139548,46 +142302,46 @@ type Placement struct { // If not specified, an Availability Zone will be automatically chosen for you // based on the load balancing criteria for the Region. // - // This parameter is not supported by CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). + // This parameter is not supported for CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). AvailabilityZone *string `locationName:"availabilityZone" type:"string"` - // The name of the placement group the instance is in. + // The ID of the placement group that the instance is in. If you specify GroupId, + // you can't specify GroupName. + GroupId *string `locationName:"groupId" type:"string"` + + // The name of the placement group that the instance is in. If you specify GroupName, + // you can't specify GroupId. GroupName *string `locationName:"groupName" type:"string"` - // The ID of the Dedicated Host on which the instance resides. This parameter - // is not supported for the ImportInstance (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ImportInstance.html) - // command. + // The ID of the Dedicated Host on which the instance resides. // - // This parameter is not supported by CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). + // This parameter is not supported for CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet) + // or ImportInstance (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ImportInstance.html). HostId *string `locationName:"hostId" type:"string"` - // The ARN of the host resource group in which to launch the instances. If you - // specify a host resource group ARN, omit the Tenancy parameter or set it to - // host. + // The ARN of the host resource group in which to launch the instances. // - // This parameter is not supported by CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). + // If you specify this parameter, either omit the Tenancy parameter or set it + // to host. + // + // This parameter is not supported for CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). HostResourceGroupArn *string `locationName:"hostResourceGroupArn" type:"string"` // The number of the partition that the instance is in. Valid only if the placement // group strategy is set to partition. // - // This parameter is not supported by CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). + // This parameter is not supported for CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). PartitionNumber *int64 `locationName:"partitionNumber" type:"integer"` // Reserved for future use. - // - // This parameter is not supported by CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). SpreadDomain *string `locationName:"spreadDomain" type:"string"` // The tenancy of the instance (if the instance is running in a VPC). An instance - // with a tenancy of dedicated runs on single-tenant hardware. The host tenancy - // is not supported for the ImportInstance (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ImportInstance.html) - // command. - // - // This parameter is not supported by CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). + // with a tenancy of dedicated runs on single-tenant hardware. // - // T3 instances that use the unlimited CPU credit option do not support host - // tenancy. + // This parameter is not supported for CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). + // The host tenancy is not supported for ImportInstance (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ImportInstance.html) + // or for T3 instances that are configured for the unlimited CPU credit option. Tenancy *string `locationName:"tenancy" type:"string" enum:"Tenancy"` } @@ -139621,6 +142375,12 @@ func (s *Placement) SetAvailabilityZone(v string) *Placement { return s } +// SetGroupId sets the GroupId field's value. +func (s *Placement) SetGroupId(v string) *Placement { + s.GroupId = &v + return s +} + // SetGroupName sets the GroupName field's value. func (s *Placement) SetGroupName(v string) *Placement { s.GroupName = &v @@ -141032,7 +143792,7 @@ func (s *ProvisionPublicIpv4PoolCidrInput) SetPoolId(v string) *ProvisionPublicI type ProvisionPublicIpv4PoolCidrOutput struct { _ struct{} `type:"structure"` - // Describes an address range of an IPv4 address pool. + // Information about the address range of the public IPv4 pool. PoolAddressRange *PublicIpv4PoolRange `locationName:"poolAddressRange" type:"structure"` // The ID of the pool that you want to provision the CIDR to. @@ -142804,7 +145564,7 @@ func (s *RejectTransitGatewayMulticastDomainAssociationsInput) SetTransitGateway type RejectTransitGatewayMulticastDomainAssociationsOutput struct { _ struct{} `type:"structure"` - // Describes the multicast domain associations. + // Information about the multicast domain associations. Associations *TransitGatewayMulticastDomainAssociations `locationName:"associations" type:"structure"` } @@ -143965,12 +146725,22 @@ type ReplaceRootVolumeTask struct { // The time the task completed. CompleteTime *string `locationName:"completeTime" type:"string"` + // Indicates whether the original root volume is to be deleted after the root + // volume replacement task completes. + DeleteReplacedRootVolume *bool `locationName:"deleteReplacedRootVolume" type:"boolean"` + + // The ID of the AMI used to create the replacement root volume. + ImageId *string `locationName:"imageId" type:"string"` + // The ID of the instance for which the root volume replacement task was created. InstanceId *string `locationName:"instanceId" type:"string"` // The ID of the root volume replacement task. ReplaceRootVolumeTaskId *string `locationName:"replaceRootVolumeTaskId" type:"string"` + // The ID of the snapshot used to create the replacement root volume. + SnapshotId *string `locationName:"snapshotId" type:"string"` + // The time the task was started. StartTime *string `locationName:"startTime" type:"string"` @@ -144024,6 +146794,18 @@ func (s *ReplaceRootVolumeTask) SetCompleteTime(v string) *ReplaceRootVolumeTask return s } +// SetDeleteReplacedRootVolume sets the DeleteReplacedRootVolume field's value. +func (s *ReplaceRootVolumeTask) SetDeleteReplacedRootVolume(v bool) *ReplaceRootVolumeTask { + s.DeleteReplacedRootVolume = &v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *ReplaceRootVolumeTask) SetImageId(v string) *ReplaceRootVolumeTask { + s.ImageId = &v + return s +} + // SetInstanceId sets the InstanceId field's value. func (s *ReplaceRootVolumeTask) SetInstanceId(v string) *ReplaceRootVolumeTask { s.InstanceId = &v @@ -144036,6 +146818,12 @@ func (s *ReplaceRootVolumeTask) SetReplaceRootVolumeTaskId(v string) *ReplaceRoo return s } +// SetSnapshotId sets the SnapshotId field's value. +func (s *ReplaceRootVolumeTask) SetSnapshotId(v string) *ReplaceRootVolumeTask { + s.SnapshotId = &v + return s +} + // SetStartTime sets the StartTime field's value. func (s *ReplaceRootVolumeTask) SetStartTime(v string) *ReplaceRootVolumeTask { s.StartTime = &v @@ -150096,8 +152884,8 @@ type S3Storage struct { _ struct{} `type:"structure"` // The access key ID of the owner of the bucket. Before you specify a value - // for your access key ID, review and follow the guidance in Best Practices - // for Managing Amazon Web Services Access Keys (https://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html). + // for your access key ID, review and follow the guidance in Best practices + // for managing Amazon Web Services access keys (https://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html). AWSAccessKeyId *string `type:"string"` // The bucket in which to store the AMI. You can specify a bucket that you already @@ -154101,29 +156889,46 @@ type SpotFleetRequestConfigData struct { // The strategy that determines how to allocate the target Spot Instance capacity // across the Spot Instance pools specified by the Spot Fleet launch configuration. // For more information, see Allocation strategies for Spot Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-allocation-strategy.html) - // in the Amazon EC2 User Guide for Linux Instances. + // in the Amazon EC2 User Guide. // - // lowestPrice - Spot Fleet launches instances from the lowest-price Spot Instance - // pool that has available capacity. If the cheapest pool doesn't have available - // capacity, the Spot Instances come from the next cheapest pool that has available - // capacity. If a pool runs out of capacity before fulfilling your desired capacity, - // Spot Fleet will continue to fulfill your request by drawing from the next - // cheapest pool. To ensure that your desired capacity is met, you might receive - // Spot Instances from several pools. - // - // diversified - Spot Fleet launches instances from all of the Spot Instance - // pools that you specify. - // - // capacityOptimized (recommended) - Spot Fleet launches instances from Spot - // Instance pools with optimal capacity for the number of instances that are - // launching. To give certain instance types a higher chance of launching first, - // use capacityOptimizedPrioritized. Set a priority for each instance type by - // using the Priority parameter for LaunchTemplateOverrides. You can assign - // the same priority to different LaunchTemplateOverrides. EC2 implements the - // priorities on a best-effort basis, but optimizes for capacity first. capacityOptimizedPrioritized - // is supported only if your Spot Fleet uses a launch template. Note that if - // the OnDemandAllocationStrategy is set to prioritized, the same priority is - // applied when fulfilling On-Demand capacity. + // priceCapacityOptimized (recommended) + // + // Spot Fleet identifies the pools with the highest capacity availability for + // the number of instances that are launching. This means that we will request + // Spot Instances from the pools that we believe have the lowest chance of interruption + // in the near term. Spot Fleet then requests Spot Instances from the lowest + // priced of these pools. + // + // capacityOptimized + // + // Spot Fleet identifies the pools with the highest capacity availability for + // the number of instances that are launching. This means that we will request + // Spot Instances from the pools that we believe have the lowest chance of interruption + // in the near term. To give certain instance types a higher chance of launching + // first, use capacityOptimizedPrioritized. Set a priority for each instance + // type by using the Priority parameter for LaunchTemplateOverrides. You can + // assign the same priority to different LaunchTemplateOverrides. EC2 implements + // the priorities on a best-effort basis, but optimizes for capacity first. + // capacityOptimizedPrioritized is supported only if your Spot Fleet uses a + // launch template. Note that if the OnDemandAllocationStrategy is set to prioritized, + // the same priority is applied when fulfilling On-Demand capacity. + // + // diversified + // + // Spot Fleet requests instances from all of the Spot Instance pools that you + // specify. + // + // lowestPrice + // + // Spot Fleet requests instances from the lowest priced Spot Instance pool that + // has available capacity. If the lowest priced pool doesn't have available + // capacity, the Spot Instances come from the next lowest priced pool that has + // available capacity. If a pool runs out of capacity before fulfilling your + // desired capacity, Spot Fleet will continue to fulfill your request by drawing + // from the next lowest priced pool. To ensure that your desired capacity is + // met, you might receive Spot Instances from several pools. Because this strategy + // only considers instance price and not capacity availability, it might lead + // to high interruption rates. // // Default: lowestPrice AllocationStrategy *string `locationName:"allocationStrategy" type:"string" enum:"AllocationStrategy"` @@ -154149,9 +156954,9 @@ type SpotFleetRequestConfigData struct { // role that grants the Spot Fleet the permission to request, launch, terminate, // and tag instances on your behalf. For more information, see Spot Fleet prerequisites // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-requests.html#spot-fleet-prerequisites) - // in the Amazon EC2 User Guide for Linux Instances. Spot Fleet can terminate - // Spot Instances on your behalf when you cancel its Spot Fleet request using - // CancelSpotFleetRequests (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CancelSpotFleetRequests) + // in the Amazon EC2 User Guide. Spot Fleet can terminate Spot Instances on + // your behalf when you cancel its Spot Fleet request using CancelSpotFleetRequests + // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CancelSpotFleetRequests) // or when the Spot Fleet request expires, if you set TerminateInstancesWithExpiration. // // IamFleetRole is a required field @@ -154987,27 +157792,44 @@ type SpotOptions struct { // For more information, see Allocation strategies for Spot Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-allocation-strategy.html) // in the Amazon EC2 User Guide. // - // lowest-price - EC2 Fleet launches instances from the lowest-price Spot Instance - // pool that has available capacity. If the cheapest pool doesn't have available - // capacity, the Spot Instances come from the next cheapest pool that has available - // capacity. If a pool runs out of capacity before fulfilling your desired capacity, - // EC2 Fleet will continue to fulfill your request by drawing from the next - // cheapest pool. To ensure that your desired capacity is met, you might receive - // Spot Instances from several pools. - // - // diversified - EC2 Fleet launches instances from all of the Spot Instance - // pools that you specify. - // - // capacity-optimized (recommended) - EC2 Fleet launches instances from Spot - // Instance pools with optimal capacity for the number of instances that are - // launching. To give certain instance types a higher chance of launching first, - // use capacity-optimized-prioritized. Set a priority for each instance type - // by using the Priority parameter for LaunchTemplateOverrides. You can assign - // the same priority to different LaunchTemplateOverrides. EC2 implements the - // priorities on a best-effort basis, but optimizes for capacity first. capacity-optimized-prioritized - // is supported only if your fleet uses a launch template. Note that if the - // On-Demand AllocationStrategy is set to prioritized, the same priority is - // applied when fulfilling On-Demand capacity. + // price-capacity-optimized (recommended) + // + // EC2 Fleet identifies the pools with the highest capacity availability for + // the number of instances that are launching. This means that we will request + // Spot Instances from the pools that we believe have the lowest chance of interruption + // in the near term. EC2 Fleet then requests Spot Instances from the lowest + // priced of these pools. + // + // capacity-optimized + // + // EC2 Fleet identifies the pools with the highest capacity availability for + // the number of instances that are launching. This means that we will request + // Spot Instances from the pools that we believe have the lowest chance of interruption + // in the near term. To give certain instance types a higher chance of launching + // first, use capacity-optimized-prioritized. Set a priority for each instance + // type by using the Priority parameter for LaunchTemplateOverrides. You can + // assign the same priority to different LaunchTemplateOverrides. EC2 implements + // the priorities on a best-effort basis, but optimizes for capacity first. + // capacity-optimized-prioritized is supported only if your EC2 Fleet uses a + // launch template. Note that if the On-Demand AllocationStrategy is set to + // prioritized, the same priority is applied when fulfilling On-Demand capacity. + // + // diversified + // + // EC2 Fleet requests instances from all of the Spot Instance pools that you + // specify. + // + // lowest-price + // + // EC2 Fleet requests instances from the lowest priced Spot Instance pool that + // has available capacity. If the lowest priced pool doesn't have available + // capacity, the Spot Instances come from the next lowest priced pool that has + // available capacity. If a pool runs out of capacity before fulfilling your + // desired capacity, EC2 Fleet will continue to fulfill your request by drawing + // from the next lowest priced pool. To ensure that your desired capacity is + // met, you might receive Spot Instances from several pools. Because this strategy + // only considers instance price and not capacity availability, it might lead + // to high interruption rates. // // Default: lowest-price AllocationStrategy *string `locationName:"allocationStrategy" type:"string" enum:"SpotAllocationStrategy"` @@ -155142,27 +157964,44 @@ type SpotOptionsRequest struct { // For more information, see Allocation strategies for Spot Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-allocation-strategy.html) // in the Amazon EC2 User Guide. // - // lowest-price - EC2 Fleet launches instances from the lowest-price Spot Instance - // pool that has available capacity. If the cheapest pool doesn't have available - // capacity, the Spot Instances come from the next cheapest pool that has available - // capacity. If a pool runs out of capacity before fulfilling your desired capacity, - // EC2 Fleet will continue to fulfill your request by drawing from the next - // cheapest pool. To ensure that your desired capacity is met, you might receive - // Spot Instances from several pools. - // - // diversified - EC2 Fleet launches instances from all of the Spot Instance - // pools that you specify. - // - // capacity-optimized (recommended) - EC2 Fleet launches instances from Spot - // Instance pools with optimal capacity for the number of instances that are - // launching. To give certain instance types a higher chance of launching first, - // use capacity-optimized-prioritized. Set a priority for each instance type - // by using the Priority parameter for LaunchTemplateOverrides. You can assign - // the same priority to different LaunchTemplateOverrides. EC2 implements the - // priorities on a best-effort basis, but optimizes for capacity first. capacity-optimized-prioritized - // is supported only if your fleet uses a launch template. Note that if the - // On-Demand AllocationStrategy is set to prioritized, the same priority is - // applied when fulfilling On-Demand capacity. + // price-capacity-optimized (recommended) + // + // EC2 Fleet identifies the pools with the highest capacity availability for + // the number of instances that are launching. This means that we will request + // Spot Instances from the pools that we believe have the lowest chance of interruption + // in the near term. EC2 Fleet then requests Spot Instances from the lowest + // priced of these pools. + // + // capacity-optimized + // + // EC2 Fleet identifies the pools with the highest capacity availability for + // the number of instances that are launching. This means that we will request + // Spot Instances from the pools that we believe have the lowest chance of interruption + // in the near term. To give certain instance types a higher chance of launching + // first, use capacity-optimized-prioritized. Set a priority for each instance + // type by using the Priority parameter for LaunchTemplateOverrides. You can + // assign the same priority to different LaunchTemplateOverrides. EC2 implements + // the priorities on a best-effort basis, but optimizes for capacity first. + // capacity-optimized-prioritized is supported only if your EC2 Fleet uses a + // launch template. Note that if the On-Demand AllocationStrategy is set to + // prioritized, the same priority is applied when fulfilling On-Demand capacity. + // + // diversified + // + // EC2 Fleet requests instances from all of the Spot Instance pools that you + // specify. + // + // lowest-price + // + // EC2 Fleet requests instances from the lowest priced Spot Instance pool that + // has available capacity. If the lowest priced pool doesn't have available + // capacity, the Spot Instances come from the next lowest priced pool that has + // available capacity. If a pool runs out of capacity before fulfilling your + // desired capacity, EC2 Fleet will continue to fulfill your request by drawing + // from the next lowest priced pool. To ensure that your desired capacity is + // met, you might receive Spot Instances from several pools. Because this strategy + // only considers instance price and not capacity availability, it might lead + // to high interruption rates. // // Default: lowest-price AllocationStrategy *string `type:"string" enum:"SpotAllocationStrategy"` @@ -155845,6 +158684,8 @@ func (s *StartNetworkInsightsAccessScopeAnalysisOutput) SetNetworkInsightsAccess type StartNetworkInsightsAnalysisInput struct { _ struct{} `type:"structure"` + AdditionalAccounts []*string `locationName:"AdditionalAccount" locationNameList:"item" type:"list"` + // Unique, case-sensitive identifier that you provide to ensure the idempotency // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` @@ -155898,6 +158739,12 @@ func (s *StartNetworkInsightsAnalysisInput) Validate() error { return nil } +// SetAdditionalAccounts sets the AdditionalAccounts field's value. +func (s *StartNetworkInsightsAnalysisInput) SetAdditionalAccounts(v []*string) *StartNetworkInsightsAnalysisInput { + s.AdditionalAccounts = v + return s +} + // SetClientToken sets the ClientToken field's value. func (s *StartNetworkInsightsAnalysisInput) SetClientToken(v string) *StartNetworkInsightsAnalysisInput { s.ClientToken = &v @@ -156843,6 +159690,76 @@ func (s *SubnetIpv6CidrBlockAssociation) SetIpv6CidrBlockState(v *SubnetCidrBloc return s } +// Describes an Infrastructure Performance subscription. +type Subscription struct { + _ struct{} `type:"structure"` + + // The Region or Availability Zone that's the target for the subscription. For + // example, eu-west-1. + Destination *string `locationName:"destination" type:"string"` + + // The metric used for the subscription. + Metric *string `locationName:"metric" type:"string" enum:"MetricType"` + + // The data aggregation time for the subscription. + Period *string `locationName:"period" type:"string" enum:"PeriodType"` + + // The Region or Availability Zone that's the source for the subscription. For + // example, us-east-1. + Source *string `locationName:"source" type:"string"` + + // The statistic used for the subscription. + Statistic *string `locationName:"statistic" type:"string" enum:"StatisticType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Subscription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Subscription) GoString() string { + return s.String() +} + +// SetDestination sets the Destination field's value. +func (s *Subscription) SetDestination(v string) *Subscription { + s.Destination = &v + return s +} + +// SetMetric sets the Metric field's value. +func (s *Subscription) SetMetric(v string) *Subscription { + s.Metric = &v + return s +} + +// SetPeriod sets the Period field's value. +func (s *Subscription) SetPeriod(v string) *Subscription { + s.Period = &v + return s +} + +// SetSource sets the Source field's value. +func (s *Subscription) SetSource(v string) *Subscription { + s.Source = &v + return s +} + +// SetStatistic sets the Statistic field's value. +func (s *Subscription) SetStatistic(v string) *Subscription { + s.Statistic = &v + return s +} + // Describes the burstable performance instance whose credit option for CPU // usage was successfully modified. type SuccessfulInstanceCreditSpecificationItem struct { @@ -165870,6 +168787,26 @@ func AddressFamily_Values() []string { } } +const ( + // AddressTransferStatusPending is a AddressTransferStatus enum value + AddressTransferStatusPending = "pending" + + // AddressTransferStatusDisabled is a AddressTransferStatus enum value + AddressTransferStatusDisabled = "disabled" + + // AddressTransferStatusAccepted is a AddressTransferStatus enum value + AddressTransferStatusAccepted = "accepted" +) + +// AddressTransferStatus_Values returns all elements of the AddressTransferStatus enum +func AddressTransferStatus_Values() []string { + return []string{ + AddressTransferStatusPending, + AddressTransferStatusDisabled, + AddressTransferStatusAccepted, + } +} + const ( // AffinityDefault is a Affinity enum value AffinityDefault = "default" @@ -165930,6 +168867,9 @@ const ( // AllocationStrategyCapacityOptimizedPrioritized is a AllocationStrategy enum value AllocationStrategyCapacityOptimizedPrioritized = "capacityOptimizedPrioritized" + + // AllocationStrategyPriceCapacityOptimized is a AllocationStrategy enum value + AllocationStrategyPriceCapacityOptimized = "priceCapacityOptimized" ) // AllocationStrategy_Values returns all elements of the AllocationStrategy enum @@ -165939,6 +168879,7 @@ func AllocationStrategy_Values() []string { AllocationStrategyDiversified, AllocationStrategyCapacityOptimized, AllocationStrategyCapacityOptimizedPrioritized, + AllocationStrategyPriceCapacityOptimized, } } @@ -166018,6 +168959,9 @@ const ( // ArchitectureTypeX8664Mac is a ArchitectureType enum value ArchitectureTypeX8664Mac = "x86_64_mac" + + // ArchitectureTypeArm64Mac is a ArchitectureType enum value + ArchitectureTypeArm64Mac = "arm64_mac" ) // ArchitectureType_Values returns all elements of the ArchitectureType enum @@ -166027,6 +168971,7 @@ func ArchitectureType_Values() []string { ArchitectureTypeX8664, ArchitectureTypeArm64, ArchitectureTypeX8664Mac, + ArchitectureTypeArm64Mac, } } @@ -166042,6 +168987,9 @@ const ( // ArchitectureValuesX8664Mac is a ArchitectureValues enum value ArchitectureValuesX8664Mac = "x86_64_mac" + + // ArchitectureValuesArm64Mac is a ArchitectureValues enum value + ArchitectureValuesArm64Mac = "arm64_mac" ) // ArchitectureValues_Values returns all elements of the ArchitectureValues enum @@ -166051,6 +168999,7 @@ func ArchitectureValues_Values() []string { ArchitectureValuesX8664, ArchitectureValuesArm64, ArchitectureValuesX8664Mac, + ArchitectureValuesArm64Mac, } } @@ -170121,6 +173070,18 @@ const ( // InstanceTypeU3tb156xlarge is a InstanceType enum value InstanceTypeU3tb156xlarge = "u-3tb1.56xlarge" + + // InstanceTypeU18tb1112xlarge is a InstanceType enum value + InstanceTypeU18tb1112xlarge = "u-18tb1.112xlarge" + + // InstanceTypeU24tb1112xlarge is a InstanceType enum value + InstanceTypeU24tb1112xlarge = "u-24tb1.112xlarge" + + // InstanceTypeTrn12xlarge is a InstanceType enum value + InstanceTypeTrn12xlarge = "trn1.2xlarge" + + // InstanceTypeTrn132xlarge is a InstanceType enum value + InstanceTypeTrn132xlarge = "trn1.32xlarge" ) // InstanceType_Values returns all elements of the InstanceType enum @@ -170695,6 +173656,10 @@ func InstanceType_Values() []string { InstanceTypeR6aMetal, InstanceTypeP4de24xlarge, InstanceTypeU3tb156xlarge, + InstanceTypeU18tb1112xlarge, + InstanceTypeU24tb1112xlarge, + InstanceTypeTrn12xlarge, + InstanceTypeTrn132xlarge, } } @@ -171546,6 +174511,18 @@ func MembershipType_Values() []string { } } +const ( + // MetricTypeAggregateLatency is a MetricType enum value + MetricTypeAggregateLatency = "aggregate-latency" +) + +// MetricType_Values returns all elements of the MetricType enum +func MetricType_Values() []string { + return []string{ + MetricTypeAggregateLatency, + } +} + const ( // ModifyAvailabilityZoneOptInStatusOptedIn is a ModifyAvailabilityZoneOptInStatus enum value ModifyAvailabilityZoneOptInStatusOptedIn = "opted-in" @@ -171954,6 +174931,38 @@ func PaymentOption_Values() []string { } } +const ( + // PeriodTypeFiveMinutes is a PeriodType enum value + PeriodTypeFiveMinutes = "five-minutes" + + // PeriodTypeFifteenMinutes is a PeriodType enum value + PeriodTypeFifteenMinutes = "fifteen-minutes" + + // PeriodTypeOneHour is a PeriodType enum value + PeriodTypeOneHour = "one-hour" + + // PeriodTypeThreeHours is a PeriodType enum value + PeriodTypeThreeHours = "three-hours" + + // PeriodTypeOneDay is a PeriodType enum value + PeriodTypeOneDay = "one-day" + + // PeriodTypeOneWeek is a PeriodType enum value + PeriodTypeOneWeek = "one-week" +) + +// PeriodType_Values returns all elements of the PeriodType enum +func PeriodType_Values() []string { + return []string{ + PeriodTypeFiveMinutes, + PeriodTypeFifteenMinutes, + PeriodTypeOneHour, + PeriodTypeThreeHours, + PeriodTypeOneDay, + PeriodTypeOneWeek, + } +} + const ( // PermissionGroupAll is a PermissionGroup enum value PermissionGroupAll = "all" @@ -172978,6 +175987,9 @@ const ( // SpotAllocationStrategyCapacityOptimizedPrioritized is a SpotAllocationStrategy enum value SpotAllocationStrategyCapacityOptimizedPrioritized = "capacity-optimized-prioritized" + + // SpotAllocationStrategyPriceCapacityOptimized is a SpotAllocationStrategy enum value + SpotAllocationStrategyPriceCapacityOptimized = "price-capacity-optimized" ) // SpotAllocationStrategy_Values returns all elements of the SpotAllocationStrategy enum @@ -172987,6 +175999,7 @@ func SpotAllocationStrategy_Values() []string { SpotAllocationStrategyDiversified, SpotAllocationStrategyCapacityOptimized, SpotAllocationStrategyCapacityOptimizedPrioritized, + SpotAllocationStrategyPriceCapacityOptimized, } } @@ -173126,6 +176139,18 @@ func StaticSourcesSupportValue_Values() []string { } } +const ( + // StatisticTypeP50 is a StatisticType enum value + StatisticTypeP50 = "p50" +) + +// StatisticType_Values returns all elements of the StatisticType enum +func StatisticType_Values() []string { + return []string{ + StatisticTypeP50, + } +} + const ( // StatusMoveInProgress is a Status enum value StatusMoveInProgress = "MoveInProgress" diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/elbv2/api.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/elbv2/api.go index 5f572a841f34..74b09dec696c 100644 --- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/elbv2/api.go +++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/elbv2/api.go @@ -2655,9 +2655,6 @@ func (c *ELBV2) ModifyTargetGroupRequest(input *ModifyTargetGroupInput) (req *re // Modifies the health checks used when evaluating the health state of the targets // in the specified target group. // -// If the protocol of the target group is TCP, TLS, UDP, or TCP_UDP, you can't -// modify the health check protocol, interval, timeout, or success codes. -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -4805,11 +4802,10 @@ type CreateTargetGroupInput struct { HealthCheckEnabled *bool `type:"boolean"` // The approximate amount of time, in seconds, between health checks of an individual - // target. If the target group protocol is HTTP or HTTPS, the default is 30 - // seconds. If the target group protocol is TCP, TLS, UDP, or TCP_UDP, the supported - // values are 10 and 30 seconds and the default is 30 seconds. If the target - // group protocol is GENEVE, the default is 10 seconds. If the target type is - // lambda, the default is 35 seconds. + // target. The range is 5-300. If the target group protocol is TCP, TLS, UDP, + // TCP_UDP, HTTP or HTTPS, the default is 30 seconds. If the target group protocol + // is GENEVE, the default is 10 seconds. If the target type is lambda, the default + // is 35 seconds. HealthCheckIntervalSeconds *int64 `min:"5" type:"integer"` // [HTTP/HTTPS health checks] The destination for health checks on the targets. @@ -4834,17 +4830,18 @@ type CreateTargetGroupInput struct { HealthCheckProtocol *string `type:"string" enum:"ProtocolEnum"` // The amount of time, in seconds, during which no response from a target means - // a failed health check. For target groups with a protocol of HTTP, HTTPS, - // or GENEVE, the default is 5 seconds. For target groups with a protocol of - // TCP or TLS, this value must be 6 seconds for HTTP health checks and 10 seconds - // for TCP and HTTPS health checks. If the target type is lambda, the default - // is 30 seconds. + // a failed health check. The range is 2–120 seconds. For target groups with + // a protocol of HTTP, the default is 6 seconds. For target groups with a protocol + // of TCP, TLS or HTTPS, the default is 10 seconds. For target groups with a + // protocol of GENEVE, the default is 5 seconds. If the target type is lambda, + // the default is 30 seconds. HealthCheckTimeoutSeconds *int64 `min:"2" type:"integer"` - // The number of consecutive health checks successes required before considering - // an unhealthy target healthy. For target groups with a protocol of HTTP or - // HTTPS, the default is 5. For target groups with a protocol of TCP, TLS, or - // GENEVE, the default is 3. If the target type is lambda, the default is 5. + // The number of consecutive health check successes required before considering + // a target healthy. The range is 2-10. If the target group protocol is TCP, + // TCP_UDP, UDP, TLS, HTTP or HTTPS, the default is 5. For target groups with + // a protocol of GENEVE, the default is 3. If the target type is lambda, the + // default is 5. HealthyThresholdCount *int64 `min:"2" type:"integer"` // The type of IP address used for this target group. The possible values are @@ -4853,7 +4850,10 @@ type CreateTargetGroupInput struct { IpAddressType *string `type:"string" enum:"TargetGroupIpAddressTypeEnum"` // [HTTP/HTTPS health checks] The HTTP or gRPC codes to use when checking for - // a successful response from a target. + // a successful response from a target. For target groups with a protocol of + // TCP, TCP_UDP, UDP or TLS the range is 200-599. For target groups with a protocol + // of HTTP or HTTPS, the range is 200-499. For target groups with a protocol + // of GENEVE, the range is 200-399. Matcher *Matcher `type:"structure"` // The name of the target group. @@ -4905,10 +4905,10 @@ type CreateTargetGroupInput struct { TargetType *string `type:"string" enum:"TargetTypeEnum"` // The number of consecutive health check failures required before considering - // a target unhealthy. If the target group protocol is HTTP or HTTPS, the default - // is 2. If the target group protocol is TCP or TLS, this value must be the - // same as the healthy threshold count. If the target group protocol is GENEVE, - // the default is 3. If the target type is lambda, the default is 2. + // a target unhealthy. The range is 2-10. If the target group protocol is TCP, + // TCP_UDP, UDP, TLS, HTTP or HTTPS, the default is 2. For target groups with + // a protocol of GENEVE, the default is 3. If the target type is lambda, the + // default is 5. UnhealthyThresholdCount *int64 `min:"2" type:"integer"` // The identifier of the virtual private cloud (VPC). If the target is a Lambda @@ -6659,8 +6659,8 @@ type ForwardActionConfig struct { // The target group stickiness for the rule. TargetGroupStickinessConfig *TargetGroupStickinessConfig `type:"structure"` - // One or more target groups. For Network Load Balancers, you can specify a - // single target group. + // The target groups. For Network Load Balancers, you can specify a single target + // group. TargetGroups []*TargetGroupTuple `type:"list"` } @@ -6698,9 +6698,9 @@ func (s *ForwardActionConfig) SetTargetGroups(v []*TargetGroupTuple) *ForwardAct type HostHeaderConditionConfig struct { _ struct{} `type:"structure"` - // One or more host names. The maximum size of each name is 128 characters. - // The comparison is case insensitive. The following wildcard characters are - // supported: * (matches 0 or more characters) and ? (matches exactly 1 character). + // The host names. The maximum size of each name is 128 characters. The comparison + // is case insensitive. The following wildcard characters are supported: * (matches + // 0 or more characters) and ? (matches exactly 1 character). // // If you specify multiple strings, the condition is satisfied if one of the // strings matches the host name. @@ -6746,10 +6746,10 @@ type HttpHeaderConditionConfig struct { // to specify a host header condition. HttpHeaderName *string `type:"string"` - // One or more strings to compare against the value of the HTTP header. The - // maximum size of each string is 128 characters. The comparison strings are - // case insensitive. The following wildcard characters are supported: * (matches - // 0 or more characters) and ? (matches exactly 1 character). + // The strings to compare against the value of the HTTP header. The maximum + // size of each string is 128 characters. The comparison strings are case insensitive. + // The following wildcard characters are supported: * (matches 0 or more characters) + // and ? (matches exactly 1 character). // // If the same header appears multiple times in the request, we search them // in order until a match is found. @@ -7226,11 +7226,16 @@ type LoadBalancerAttribute struct { // The name of the attribute. // - // The following attribute is supported by all load balancers: + // The following attributes are supported by all load balancers: // // * deletion_protection.enabled - Indicates whether deletion protection // is enabled. The value is true or false. The default is false. // + // * load_balancing.cross_zone.enabled - Indicates whether cross-zone load + // balancing is enabled. The possible values are true and false. The default + // for Network Load Balancers and Gateway Load Balancers is false. The default + // for Application Load Balancers is true, and cannot be changed. + // // The following attributes are supported by both Application Load Balancers // and Network Load Balancers: // @@ -7305,13 +7310,6 @@ type LoadBalancerAttribute struct { // balancer to route requests to targets if it is unable to forward the request // to Amazon Web Services WAF. The possible values are true and false. The // default is false. - // - // The following attribute is supported by Network Load Balancers and Gateway - // Load Balancers: - // - // * load_balancing.cross_zone.enabled - Indicates whether cross-zone load - // balancing is enabled. The possible values are true and false. The default - // is false. Key *string `type:"string"` // The value of the attribute. @@ -7405,10 +7403,14 @@ type Matcher struct { GrpcCode *string `type:"string"` // For Application Load Balancers, you can specify values between 200 and 499, - // and the default value is 200. You can specify multiple values (for example, + // with the default value being 200. You can specify multiple values (for example, + // "200,202") or a range of values (for example, "200-299"). + // + // For Network Load Balancers, you can specify values between 200 and 599, with + // the default value being 200-399. You can specify multiple values (for example, // "200,202") or a range of values (for example, "200-299"). // - // For Network Load Balancers and Gateway Load Balancers, this must be "200–399". + // For Gateway Load Balancers, this must be "200–399". // // Note that when using shorthand syntax, some values such as commas need to // be escaped. @@ -7909,7 +7911,7 @@ type ModifyTargetGroupInput struct { HealthCheckEnabled *bool `type:"boolean"` // The approximate amount of time, in seconds, between health checks of an individual - // target. For TCP health checks, the supported values are 10 or 30 seconds. + // target. HealthCheckIntervalSeconds *int64 `min:"5" type:"integer"` // [HTTP/HTTPS health checks] The destination for health checks on the targets. @@ -7941,7 +7943,10 @@ type ModifyTargetGroupInput struct { HealthyThresholdCount *int64 `min:"2" type:"integer"` // [HTTP/HTTPS health checks] The HTTP or gRPC codes to use when checking for - // a successful response from a target. + // a successful response from a target. For target groups with a protocol of + // TCP, TCP_UDP, UDP or TLS the range is 200-599. For target groups with a protocol + // of HTTP or HTTPS, the range is 200-499. For target groups with a protocol + // of GENEVE, the range is 200-399. Matcher *Matcher `type:"structure"` // The Amazon Resource Name (ARN) of the target group. @@ -7950,8 +7955,7 @@ type ModifyTargetGroupInput struct { TargetGroupArn *string `type:"string" required:"true"` // The number of consecutive health check failures required before considering - // the target unhealthy. For target groups with a protocol of TCP or TLS, this - // value must be the same as the healthy threshold count. + // the target unhealthy. UnhealthyThresholdCount *int64 `min:"2" type:"integer"` } @@ -8096,10 +8100,10 @@ func (s *ModifyTargetGroupOutput) SetTargetGroups(v []*TargetGroup) *ModifyTarge type PathPatternConditionConfig struct { _ struct{} `type:"structure"` - // One or more path patterns to compare against the request URL. The maximum - // size of each string is 128 characters. The comparison is case sensitive. - // The following wildcard characters are supported: * (matches 0 or more characters) - // and ? (matches exactly 1 character). + // The path patterns to compare against the request URL. The maximum size of + // each string is 128 characters. The comparison is case sensitive. The following + // wildcard characters are supported: * (matches 0 or more characters) and ? + // (matches exactly 1 character). // // If you specify multiple strings, the condition is satisfied if one of them // matches the request URL. The path pattern is compared only to the path of @@ -8141,9 +8145,9 @@ func (s *PathPatternConditionConfig) SetValues(v []*string) *PathPatternConditio type QueryStringConditionConfig struct { _ struct{} `type:"structure"` - // One or more key/value pairs or values to find in the query string. The maximum - // size of each string is 128 characters. The comparison is case insensitive. - // The following wildcard characters are supported: * (matches 0 or more characters) + // The key/value pairs or values to find in the query string. The maximum size + // of each string is 128 characters. The comparison is case insensitive. The + // following wildcard characters are supported: * (matches 0 or more characters) // and ? (matches exactly 1 character). To search for a literal '*' or '?' character // in a query string, you must escape these characters in Values using a '\' // character. @@ -9292,8 +9296,8 @@ func (s *SetSubnetsOutput) SetIpAddressType(v string) *SetSubnetsOutput { type SourceIpConditionConfig struct { _ struct{} `type:"structure"` - // One or more source IP addresses, in CIDR format. You can use both IPv4 and - // IPv6 addresses. Wildcards are not supported. + // The source IP addresses, in CIDR format. You can use both IPv4 and IPv6 addresses. + // Wildcards are not supported. // // If you specify multiple addresses, the condition is satisfied if the source // IP address of the request matches one of the CIDR blocks. This condition @@ -9553,6 +9557,10 @@ type TargetDescription struct { // traffic from the load balancer nodes in the specified Availability Zone or // from all enabled Availability Zones for the load balancer. // + // For Application Load Balancer target groups, the specified Availability Zone + // value is only applicable when cross-zone load balancing is off. Otherwise + // the parameter is ignored and treated as all. + // // This parameter is not supported if the target type of the target group is // instance or alb. // @@ -9561,8 +9569,10 @@ type TargetDescription struct { // parameter is optional. If the IP address is outside the VPC, this parameter // is required. // - // With an Application Load Balancer, if the target type is ip and the IP address - // is outside the VPC for the target group, the only supported value is all. + // For Application Load Balancer target groups with cross-zone load balancing + // off, if the target type is ip and the IP address is outside of the VPC for + // the target group, this should be an Availability Zone inside the VPC for + // the target group. // // If the target type is lambda, this parameter is optional and the only supported // value is all. @@ -9842,7 +9852,7 @@ type TargetGroupAttribute struct { // The name of the attribute. // - // The following attribute is supported by all load balancers: + // The following attributes are supported by all load balancers: // // * deregistration_delay.timeout_seconds - The amount of time, in seconds, // for Elastic Load Balancing to wait before changing the state of a deregistering @@ -9850,9 +9860,6 @@ type TargetGroupAttribute struct { // value is 300 seconds. If the target is a Lambda function, this attribute // is not supported. // - // The following attributes are supported by Application Load Balancers, Network - // Load Balancers, and Gateway Load Balancers: - // // * stickiness.enabled - Indicates whether target stickiness is enabled. // The value is true or false. The default is false. // @@ -9861,6 +9868,37 @@ type TargetGroupAttribute struct { // for Network Load Balancers. source_ip_dest_ip and source_ip_dest_ip_proto // for Gateway Load Balancers. // + // The following attributes are supported by Application Load Balancers and + // Network Load Balancers: + // + // * load_balancing.cross_zone.enabled - Indicates whether cross zone load + // balancing is enabled. The value is true, false or use_load_balancer_configuration. + // The default is use_load_balancer_configuration. + // + // * target_group_health.dns_failover.minimum_healthy_targets.count - The + // minimum number of targets that must be healthy. If the number of healthy + // targets is below this value, mark the zone as unhealthy in DNS, so that + // traffic is routed only to healthy zones. The possible values are off or + // an integer from 1 to the maximum number of targets. The default is off. + // + // * target_group_health.dns_failover.minimum_healthy_targets.percentage + // - The minimum percentage of targets that must be healthy. If the percentage + // of healthy targets is below this value, mark the zone as unhealthy in + // DNS, so that traffic is routed only to healthy zones. The possible values + // are off or an integer from 1 to 100. The default is off. + // + // * target_group_health.unhealthy_state_routing.minimum_healthy_targets.count + // - The minimum number of targets that must be healthy. If the number of + // healthy targets is below this value, send traffic to all targets, including + // unhealthy targets. The possible values are 1 to the maximum number of + // targets. The default is 1. + // + // * target_group_health.unhealthy_state_routing.minimum_healthy_targets.percentage + // - The minimum percentage of targets that must be healthy. If the percentage + // of healthy targets is below this value, send traffic to all targets, including + // unhealthy targets. The possible values are off or an integer from 1 to + // 100. The default is off. + // // The following attributes are supported only if the load balancer is an Application // Load Balancer and the target is an instance or an IP address: // diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index 2b7e675ab864..c0706bec93dd 100644 --- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -74,16 +74,16 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // // (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plaintext that you use for both inline -// and managed session policies can't exceed 2,048 characters. Passing policies -// to this operation returns new temporary credentials. The resulting session's -// permissions are the intersection of the role's identity-based policy and -// the session policies. You can use the role's temporary credentials in subsequent -// Amazon Web Services API calls to access resources in the account that owns -// the role. You cannot use session policies to grant more permissions than -// those allowed by the identity-based policy of the role that is being assumed. -// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that +// you use for both inline and managed session policies can't exceed 2,048 characters. +// Passing policies to this operation returns new temporary credentials. The +// resulting session's permissions are the intersection of the role's identity-based +// policy and the session policies. You can use the role's temporary credentials +// in subsequent Amazon Web Services API calls to access resources in the account +// that owns the role. You cannot use session policies to grant more permissions +// than those allowed by the identity-based policy of the role that is being +// assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // // When you create a role, you create two policies: A role trust policy that @@ -307,16 +307,16 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // // (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plaintext that you use for both inline -// and managed session policies can't exceed 2,048 characters. Passing policies -// to this operation returns new temporary credentials. The resulting session's -// permissions are the intersection of the role's identity-based policy and -// the session policies. You can use the role's temporary credentials in subsequent -// Amazon Web Services API calls to access resources in the account that owns -// the role. You cannot use session policies to grant more permissions than -// those allowed by the identity-based policy of the role that is being assumed. -// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that +// you use for both inline and managed session policies can't exceed 2,048 characters. +// Passing policies to this operation returns new temporary credentials. The +// resulting session's permissions are the intersection of the role's identity-based +// policy and the session policies. You can use the role's temporary credentials +// in subsequent Amazon Web Services API calls to access resources in the account +// that owns the role. You cannot use session policies to grant more permissions +// than those allowed by the identity-based policy of the role that is being +// assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // // Calling AssumeRoleWithSAML does not require the use of Amazon Web Services @@ -343,11 +343,12 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // -// An Amazon Web Services conversion compresses the passed session policies -// and session tags into a packed binary format that has a separate limit. Your -// request can fail for this limit even if your plaintext meets the other requirements. -// The PackedPolicySize response element indicates by percentage how close the -// policies and tags for your request are to the upper size limit. +// An Amazon Web Services conversion compresses the passed inline session policy, +// managed policy ARNs, and session tags into a packed binary format that has +// a separate limit. Your request can fail for this limit even if your plaintext +// meets the other requirements. The PackedPolicySize response element indicates +// by percentage how close the policies and tags for your request are to the +// upper size limit. // // You can pass a session tag with the same key as a tag that is attached to // the role. When you do, session tags override the role's tags with the same @@ -563,16 +564,16 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // // (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plaintext that you use for both inline -// and managed session policies can't exceed 2,048 characters. Passing policies -// to this operation returns new temporary credentials. The resulting session's -// permissions are the intersection of the role's identity-based policy and -// the session policies. You can use the role's temporary credentials in subsequent -// Amazon Web Services API calls to access resources in the account that owns -// the role. You cannot use session policies to grant more permissions than -// those allowed by the identity-based policy of the role that is being assumed. -// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that +// you use for both inline and managed session policies can't exceed 2,048 characters. +// Passing policies to this operation returns new temporary credentials. The +// resulting session's permissions are the intersection of the role's identity-based +// policy and the session policies. You can use the role's temporary credentials +// in subsequent Amazon Web Services API calls to access resources in the account +// that owns the role. You cannot use session policies to grant more permissions +// than those allowed by the identity-based policy of the role that is being +// assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // // # Tags @@ -588,11 +589,12 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // -// An Amazon Web Services conversion compresses the passed session policies -// and session tags into a packed binary format that has a separate limit. Your -// request can fail for this limit even if your plaintext meets the other requirements. -// The PackedPolicySize response element indicates by percentage how close the -// policies and tags for your request are to the upper size limit. +// An Amazon Web Services conversion compresses the passed inline session policy, +// managed policy ARNs, and session tags into a packed binary format that has +// a separate limit. Your request can fail for this limit even if your plaintext +// meets the other requirements. The PackedPolicySize response element indicates +// by percentage how close the policies and tags for your request are to the +// upper size limit. // // You can pass a session tag with the same key as a tag that is attached to // the role. When you do, the session tag overrides the role tag with the same @@ -1110,9 +1112,9 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plaintext that you use for both inline -// and managed session policies can't exceed 2,048 characters. +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that +// you use for both inline and managed session policies can't exceed 2,048 characters. // // Though the session policy parameters are optional, if you do not pass a policy, // then the resulting federated user session has no permissions. When you pass @@ -1424,11 +1426,12 @@ type AssumeRoleInput struct { // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage // return (\u000D) characters. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -1441,11 +1444,12 @@ type AssumeRoleInput struct { // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the Amazon Web Services General Reference. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. // // Passing policies to this operation returns new temporary credentials. The // resulting session's permissions are the intersection of the role's identity-based @@ -1520,11 +1524,12 @@ type AssumeRoleInput struct { // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. // // You can pass a session tag with the same key as a tag that is already attached // to the role. When you do, session tags override a role tag with the same @@ -1843,11 +1848,12 @@ type AssumeRoleWithSAMLInput struct { // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage // return (\u000D) characters. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -1860,11 +1866,12 @@ type AssumeRoleWithSAMLInput struct { // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the Amazon Web Services General Reference. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. // // Passing policies to this operation returns new temporary credentials. The // resulting session's permissions are the intersection of the role's identity-based @@ -2190,11 +2197,12 @@ type AssumeRoleWithWebIdentityInput struct { // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage // return (\u000D) characters. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -2207,11 +2215,12 @@ type AssumeRoleWithWebIdentityInput struct { // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the Amazon Web Services General Reference. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. // // Passing policies to this operation returns new temporary credentials. The // resulting session's permissions are the intersection of the role's identity-based @@ -2934,8 +2943,8 @@ type GetFederationTokenInput struct { // // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an - // inline session policy. You can also specify up to 10 managed policies to - // use as managed session policies. + // inline session policy. You can also specify up to 10 managed policy Amazon + // Resource Names (ARNs) to use as managed session policies. // // This parameter is optional. However, if you do not pass any session policies, // then the resulting federated user session has no permissions. @@ -2960,11 +2969,12 @@ type GetFederationTokenInput struct { // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage // return (\u000D) characters. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -2973,11 +2983,12 @@ type GetFederationTokenInput struct { // // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an - // inline session policy. You can also specify up to 10 managed policies to - // use as managed session policies. The plaintext that you use for both inline - // and managed session policies can't exceed 2,048 characters. You can provide - // up to 10 managed policy ARNs. For more information about ARNs, see Amazon - // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // inline session policy. You can also specify up to 10 managed policy Amazon + // Resource Names (ARNs) to use as managed session policies. The plaintext that + // you use for both inline and managed session policies can't exceed 2,048 characters. + // You can provide up to 10 managed policy ARNs. For more information about + // ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces + // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the Amazon Web Services General Reference. // // This parameter is optional. However, if you do not pass any session policies, @@ -2997,11 +3008,12 @@ type GetFederationTokenInput struct { // by the policy. These permissions are granted in addition to the permissions // that are granted by the session policies. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. PolicyArns []*PolicyDescriptorType `type:"list"` // A list of session tags. Each session tag consists of a key name and an associated @@ -3015,11 +3027,12 @@ type GetFederationTokenInput struct { // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. // // You can pass a session tag with the same key as a tag that is already attached // to the user you are federating. When you do, session tags override a user diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/.travis.yml b/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/.travis.yml deleted file mode 100644 index 03a22fe06fd8..000000000000 --- a/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.14.x -- 1.15.x -install: -- GO111MODULE=off go get -u gotest.tools/gotestsum -env: -- GO111MODULE=on -language: go -notifications: - slack: - secure: a5VgoiwB1G/AZqzmephPZIhEB9avMlsWSlVnM1dSAtYAwdrQHGTQxAmpOxYIoSPDhWNN5bfZmjd29++UlTwLcHSR+e0kJhH6IfDlsHj/HplNCJ9tyI0zYc7XchtdKgeMxMzBKCzgwFXGSbQGydXTliDNBo0HOzmY3cou/daMFTP60K+offcjS+3LRAYb1EroSRXZqrk1nuF/xDL3792DZUdPMiFR/L/Df6y74D6/QP4sTkTDFQitz4Wy/7jbsfj8dG6qK2zivgV6/l+w4OVjFkxVpPXogDWY10vVXNVynqxfJ7to2d1I9lNCHE2ilBCkWMIPdyJF7hjF8pKW+82yP4EzRh0vu8Xn0HT5MZpQxdRY/YMxNrWaG7SxsoEaO4q5uhgdzAqLYY3TRa7MjIK+7Ur+aqOeTXn6OKwVi0CjvZ6mIU3WUKSwiwkFZMbjRAkSb5CYwMEfGFO/z964xz83qGt6WAtBXNotqCQpTIiKtDHQeLOMfksHImCg6JLhQcWBVxamVgu0G3Pdh8Y6DyPnxraXY95+QDavbjqv7TeYT9T/FNnrkXaTTK0s4iWE5H4ACU0Qvz0wUYgfQrZv0/Hp7V17+rabUwnzYySHCy9SWX/7OV9Cfh31iMp9ZIffr76xmmThtOEqs8TrTtU6BWI3rWwvA9cXQipZTVtL0oswrGw= -script: -- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/.golangci.yml b/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/.golangci.yml index f9381aee5419..013fc1943a9e 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/.golangci.yml +++ b/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/.golangci.yml @@ -1,8 +1,6 @@ linters-settings: govet: check-shadowing: true - golint: - min-confidence: 0 gocyclo: min-complexity: 30 maligned: @@ -12,6 +10,8 @@ linters-settings: goconst: min-len: 2 min-occurrences: 4 + paralleltest: + ignore-missing: true linters: enable-all: true disable: @@ -39,3 +39,12 @@ linters: - nestif - godot - errorlint + - varcheck + - interfacer + - deadcode + - golint + - ifshort + - structcheck + - nosnakecase + - varnamelen + - exhaustruct diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/.travis.yml b/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/.travis.yml deleted file mode 100644 index 05482f4b90cb..000000000000 --- a/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/.travis.yml +++ /dev/null @@ -1,24 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.14.x -- 1.x -install: -- go get gotest.tools/gotestsum -jobs: - include: - # include linting job, but only for latest go version and amd64 arch - - go: 1.x - arch: amd64 - install: - go get github.com/golangci/golangci-lint/cmd/golangci-lint - script: - - golangci-lint run --new-from-rev master -env: -- GO111MODULE=on -language: go -notifications: - slack: - secure: OpQG/36F7DSF00HLm9WZMhyqFCYYyYTsVDObW226cWiR8PWYiNfLZiSEvIzT1Gx4dDjhigKTIqcLhG34CkL5iNXDjm9Yyo2RYhQPlK8NErNqUEXuBqn4RqYHW48VGhEhOyDd4Ei0E2FN5ZbgpvHgtpkdZ6XDi64r3Ac89isP9aPHXQTuv2Jog6b4/OKKiUTftLcTIst0p4Cp3gqOJWf1wnoj+IadWiECNVQT6zb47IYjtyw6+uV8iUjTzdKcRB6Zc6b4Dq7JAg1Zd7Jfxkql3hlKp4PNlRf9Cy7y5iA3G7MLyg3FcPX5z2kmcyPt2jOTRMBWUJ5zIQpOxizAcN8WsT3WWBL5KbuYK6k0PzujrIDLqdxGpNmjkkMfDBT9cKmZpm2FdW+oZgPFJP+oKmAo4u4KJz/vjiPTXgQlN5bmrLuRMCp+AwC5wkIohTqWZVPE2TK6ZSnMYcg/W39s+RP/9mJoyryAvPSpBOLTI+biCgaUCTOAZxNTWpMFc3tPYntc41WWkdKcooZ9JA5DwfcaVFyTGQ3YXz+HvX6G1z/gW0Q/A4dBi9mj2iE1xm7tRTT+4VQ2AXFvSEI1HJpfPgYnwAtwOD1v3Qm2EUHk9sCdtEDR4wVGEPIVn44GnwFMnGKx9JWppMPYwFu3SVDdHt+E+LOlhZUply11Aa+IVrT2KUQ= -script: -- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go b/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go index 8956c30884d9..fb376fce2da1 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go @@ -7,8 +7,8 @@ import ( ) const ( - defaultHttpPort = ":80" - defaultHttpsPort = ":443" + defaultHTTPPort = ":80" + defaultHTTPSPort = ":443" ) // Regular expressions used by the normalizations @@ -18,13 +18,14 @@ var rxDupSlashes = regexp.MustCompile(`/{2,}`) // NormalizeURL will normalize the specified URL // This was added to replace a previous call to the no longer maintained purell library: // The call that was used looked like the following: -// url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes)) +// +// url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes)) // // To explain all that was included in the call above, purell.FlagsSafe was really just the following: -// - FlagLowercaseScheme -// - FlagLowercaseHost -// - FlagRemoveDefaultPort -// - FlagRemoveDuplicateSlashes (and this was mixed in with the |) +// - FlagLowercaseScheme +// - FlagLowercaseHost +// - FlagRemoveDefaultPort +// - FlagRemoveDuplicateSlashes (and this was mixed in with the |) func NormalizeURL(u *url.URL) { lowercaseScheme(u) lowercaseHost(u) @@ -48,7 +49,7 @@ func removeDefaultPort(u *url.URL) { if len(u.Host) > 0 { scheme := strings.ToLower(u.Scheme) u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string { - if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) { + if (scheme == "http" && val == defaultHTTPPort) || (scheme == "https" && val == defaultHTTPSPort) { return "" } return val diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/swag/.gitattributes b/cluster-autoscaler/vendor/github.com/go-openapi/swag/.gitattributes new file mode 100644 index 000000000000..49ad52766abb --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-openapi/swag/.gitattributes @@ -0,0 +1,2 @@ +# gofmt always uses LF, whereas Git uses CRLF on Windows. +*.go text eol=lf diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/swag/.golangci.yml b/cluster-autoscaler/vendor/github.com/go-openapi/swag/.golangci.yml index 813c47aa6436..bf503e400016 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/swag/.golangci.yml +++ b/cluster-autoscaler/vendor/github.com/go-openapi/swag/.golangci.yml @@ -37,3 +37,18 @@ linters: - gci - gocognit - paralleltest + - thelper + - ifshort + - gomoddirectives + - cyclop + - forcetypeassert + - ireturn + - tagliatelle + - varnamelen + - goimports + - tenv + - golint + - exhaustruct + - nilnil + - nonamedreturns + - nosnakecase diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/swag/.travis.yml b/cluster-autoscaler/vendor/github.com/go-openapi/swag/.travis.yml deleted file mode 100644 index fc25a887286c..000000000000 --- a/cluster-autoscaler/vendor/github.com/go-openapi/swag/.travis.yml +++ /dev/null @@ -1,37 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.14.x -- 1.x -arch: -- amd64 -jobs: - include: - # include arch ppc, but only for latest go version - skip testing for race - - go: 1.x - arch: ppc64le - install: ~ - script: - - go test -v - - #- go: 1.x - # arch: arm - # install: ~ - # script: - # - go test -v - - # include linting job, but only for latest go version and amd64 arch - - go: 1.x - arch: amd64 - install: - go get github.com/golangci/golangci-lint/cmd/golangci-lint - script: - - golangci-lint run --new-from-rev master -install: -- GO111MODULE=off go get -u gotest.tools/gotestsum -language: go -notifications: - slack: - secure: QUWvCkBBK09GF7YtEvHHVt70JOkdlNBG0nIKu/5qc4/nW5HP8I2w0SEf/XR2je0eED1Qe3L/AfMCWwrEj+IUZc3l4v+ju8X8R3Lomhme0Eb0jd1MTMCuPcBT47YCj0M7RON7vXtbFfm1hFJ/jLe5+9FXz0hpXsR24PJc5ZIi/ogNwkaPqG4BmndzecpSh0vc2FJPZUD9LT0I09REY/vXR0oQAalLkW0asGD5taHZTUZq/kBpsNxaAFrLM23i4mUcf33M5fjLpvx5LRICrX/57XpBrDh2TooBU6Qj3CgoY0uPRYUmSNxbVx1czNzl2JtEpb5yjoxfVPQeg0BvQM00G8LJINISR+ohrjhkZmAqchDupAX+yFrxTtORa78CtnIL6z/aTNlgwwVD8kvL/1pFA/JWYmKDmz93mV/+6wubGzNSQCstzjkFA4/iZEKewKUoRIAi/fxyscP6L/rCpmY/4llZZvrnyTqVbt6URWpopUpH4rwYqreXAtJxJsfBJIeSmUIiDIOMGkCTvyTEW3fWGmGoqWtSHLoaWDyAIGb7azb+KvfpWtEcoPFWfSWU+LGee0A/YsUhBl7ADB9A0CJEuR8q4BPpKpfLwPKSiKSAXL7zDkyjExyhtgqbSl2jS+rKIHOZNL8JkCcTP2MKMVd563C5rC5FMKqu3S9m2b6380E= -script: -- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/swag/doc.go b/cluster-autoscaler/vendor/github.com/go-openapi/swag/doc.go index 8d2c8c5014e2..55094cb74c4d 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/swag/doc.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/swag/doc.go @@ -17,16 +17,15 @@ Package swag contains a bunch of helper functions for go-openapi and go-swagger You may also use it standalone for your projects. - * convert between value and pointers for builtin types - * convert from string to builtin types (wraps strconv) - * fast json concatenation - * search in path - * load from file or http - * name mangling - + - convert between value and pointers for builtin types + - convert from string to builtin types (wraps strconv) + - fast json concatenation + - search in path + - load from file or http + - name mangling This repo has only few dependencies outside of the standard library: - * YAML utilities depend on gopkg.in/yaml.v2 + - YAML utilities depend on gopkg.in/yaml.v2 */ package swag diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/swag/file.go b/cluster-autoscaler/vendor/github.com/go-openapi/swag/file.go new file mode 100644 index 000000000000..16accc55f82e --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-openapi/swag/file.go @@ -0,0 +1,33 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import "mime/multipart" + +// File represents an uploaded file. +type File struct { + Data multipart.File + Header *multipart.FileHeader +} + +// Read bytes from the file +func (f *File) Read(p []byte) (n int, err error) { + return f.Data.Read(p) +} + +// Close the file +func (f *File) Close() error { + return f.Data.Close() +} diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/swag/loading.go b/cluster-autoscaler/vendor/github.com/go-openapi/swag/loading.go index 9a60409725e4..00038c3773c9 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/swag/loading.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/swag/loading.go @@ -16,10 +16,11 @@ package swag import ( "fmt" - "io/ioutil" + "io" "log" "net/http" "net/url" + "os" "path/filepath" "runtime" "strings" @@ -40,13 +41,13 @@ var LoadHTTPCustomHeaders = map[string]string{} // LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in func LoadFromFileOrHTTP(path string) ([]byte, error) { - return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path) + return LoadStrategy(path, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path) } // LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in // timeout arg allows for per request overriding of the request timeout func LoadFromFileOrHTTPWithTimeout(path string, timeout time.Duration) ([]byte, error) { - return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes(timeout))(path) + return LoadStrategy(path, os.ReadFile, loadHTTPBytes(timeout))(path) } // LoadStrategy returns a loader function for a given path or uri @@ -86,7 +87,7 @@ func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func( func loadHTTPBytes(timeout time.Duration) func(path string) ([]byte, error) { return func(path string) ([]byte, error) { client := &http.Client{Timeout: timeout} - req, err := http.NewRequest("GET", path, nil) // nolint: noctx + req, err := http.NewRequest(http.MethodGet, path, nil) //nolint:noctx if err != nil { return nil, err } @@ -115,6 +116,6 @@ func loadHTTPBytes(timeout time.Duration) func(path string) ([]byte, error) { return nil, fmt.Errorf("could not access document at %q [%s] ", path, resp.Status) } - return ioutil.ReadAll(resp.Body) + return io.ReadAll(resp.Body) } } diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/swag/post_go18.go b/cluster-autoscaler/vendor/github.com/go-openapi/swag/post_go18.go index c2e686d31337..f5228b82c0f8 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/swag/post_go18.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/swag/post_go18.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.8 // +build go1.8 package swag diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/swag/post_go19.go b/cluster-autoscaler/vendor/github.com/go-openapi/swag/post_go19.go index eb2f2d8bc784..7c7da9c08804 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/swag/post_go19.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/swag/post_go19.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.9 // +build go1.9 package swag diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/swag/pre_go18.go b/cluster-autoscaler/vendor/github.com/go-openapi/swag/pre_go18.go index 6607f339357b..2757d9b95f82 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/swag/pre_go18.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/swag/pre_go18.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.8 // +build !go1.8 package swag diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/swag/pre_go19.go b/cluster-autoscaler/vendor/github.com/go-openapi/swag/pre_go19.go index 4bae187d1e43..0565db377bef 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/swag/pre_go19.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/swag/pre_go19.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.9 // +build !go1.9 package swag diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/swag/util.go b/cluster-autoscaler/vendor/github.com/go-openapi/swag/util.go index 193702f2cec0..f78ab684a0ae 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/swag/util.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/swag/util.go @@ -99,10 +99,11 @@ const ( ) // JoinByFormat joins a string array by a known format (e.g. swagger's collectionFormat attribute): -// ssv: space separated value -// tsv: tab separated value -// pipes: pipe (|) separated value -// csv: comma separated value (default) +// +// ssv: space separated value +// tsv: tab separated value +// pipes: pipe (|) separated value +// csv: comma separated value (default) func JoinByFormat(data []string, format string) []string { if len(data) == 0 { return data @@ -124,11 +125,11 @@ func JoinByFormat(data []string, format string) []string { } // SplitByFormat splits a string by a known format: -// ssv: space separated value -// tsv: tab separated value -// pipes: pipe (|) separated value -// csv: comma separated value (default) // +// ssv: space separated value +// tsv: tab separated value +// pipes: pipe (|) separated value +// csv: comma separated value (default) func SplitByFormat(data, format string) []string { if data == "" { return nil diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/swag/yaml.go b/cluster-autoscaler/vendor/github.com/go-openapi/swag/yaml.go index ec96914405b7..f09ee609f3b4 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/swag/yaml.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/swag/yaml.go @@ -22,7 +22,7 @@ import ( "github.com/mailru/easyjson/jlexer" "github.com/mailru/easyjson/jwriter" - yaml "gopkg.in/yaml.v2" + yaml "gopkg.in/yaml.v3" ) // YAMLMatcher matches yaml @@ -43,16 +43,126 @@ func YAMLToJSON(data interface{}) (json.RawMessage, error) { // BytesToYAMLDoc converts a byte slice into a YAML document func BytesToYAMLDoc(data []byte) (interface{}, error) { - var canary map[interface{}]interface{} // validate this is an object and not a different type - if err := yaml.Unmarshal(data, &canary); err != nil { + var document yaml.Node // preserve order that is present in the document + if err := yaml.Unmarshal(data, &document); err != nil { return nil, err } + if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode { + return nil, fmt.Errorf("only YAML documents that are objects are supported") + } + return &document, nil +} - var document yaml.MapSlice // preserve order that is present in the document - if err := yaml.Unmarshal(data, &document); err != nil { - return nil, err +func yamlNode(root *yaml.Node) (interface{}, error) { + switch root.Kind { + case yaml.DocumentNode: + return yamlDocument(root) + case yaml.SequenceNode: + return yamlSequence(root) + case yaml.MappingNode: + return yamlMapping(root) + case yaml.ScalarNode: + return yamlScalar(root) + case yaml.AliasNode: + return yamlNode(root.Alias) + default: + return nil, fmt.Errorf("unsupported YAML node type: %v", root.Kind) + } +} + +func yamlDocument(node *yaml.Node) (interface{}, error) { + if len(node.Content) != 1 { + return nil, fmt.Errorf("unexpected YAML Document node content length: %d", len(node.Content)) + } + return yamlNode(node.Content[0]) +} + +func yamlMapping(node *yaml.Node) (interface{}, error) { + m := make(JSONMapSlice, len(node.Content)/2) + + var j int + for i := 0; i < len(node.Content); i += 2 { + var nmi JSONMapItem + k, err := yamlStringScalarC(node.Content[i]) + if err != nil { + return nil, fmt.Errorf("unable to decode YAML map key: %w", err) + } + nmi.Key = k + v, err := yamlNode(node.Content[i+1]) + if err != nil { + return nil, fmt.Errorf("unable to process YAML map value for key %q: %w", k, err) + } + nmi.Value = v + m[j] = nmi + j++ + } + return m, nil +} + +func yamlSequence(node *yaml.Node) (interface{}, error) { + s := make([]interface{}, 0) + + for i := 0; i < len(node.Content); i++ { + + v, err := yamlNode(node.Content[i]) + if err != nil { + return nil, fmt.Errorf("unable to decode YAML sequence value: %w", err) + } + s = append(s, v) + } + return s, nil +} + +const ( // See https://yaml.org/type/ + yamlStringScalar = "tag:yaml.org,2002:str" + yamlIntScalar = "tag:yaml.org,2002:int" + yamlBoolScalar = "tag:yaml.org,2002:bool" + yamlFloatScalar = "tag:yaml.org,2002:float" + yamlTimestamp = "tag:yaml.org,2002:timestamp" + yamlNull = "tag:yaml.org,2002:null" +) + +func yamlScalar(node *yaml.Node) (interface{}, error) { + switch node.LongTag() { + case yamlStringScalar: + return node.Value, nil + case yamlBoolScalar: + b, err := strconv.ParseBool(node.Value) + if err != nil { + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting bool content: %w", node.Value, err) + } + return b, nil + case yamlIntScalar: + i, err := strconv.ParseInt(node.Value, 10, 64) + if err != nil { + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting integer content: %w", node.Value, err) + } + return i, nil + case yamlFloatScalar: + f, err := strconv.ParseFloat(node.Value, 64) + if err != nil { + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting float content: %w", node.Value, err) + } + return f, nil + case yamlTimestamp: + return node.Value, nil + case yamlNull: + return nil, nil + default: + return nil, fmt.Errorf("YAML tag %q is not supported", node.LongTag()) + } +} + +func yamlStringScalarC(node *yaml.Node) (string, error) { + if node.Kind != yaml.ScalarNode { + return "", fmt.Errorf("expecting a string scalar but got %q", node.Kind) + } + switch node.LongTag() { + case yamlStringScalar, yamlIntScalar, yamlFloatScalar: + return node.Value, nil + default: + return "", fmt.Errorf("YAML tag %q is not supported as map key", node.LongTag()) } - return document, nil } // JSONMapSlice represent a JSON object, with the order of keys maintained @@ -105,6 +215,113 @@ func (s *JSONMapSlice) UnmarshalEasyJSON(in *jlexer.Lexer) { *s = result } +func (s JSONMapSlice) MarshalYAML() (interface{}, error) { + var n yaml.Node + n.Kind = yaml.DocumentNode + var nodes []*yaml.Node + for _, item := range s { + nn, err := json2yaml(item.Value) + if err != nil { + return nil, err + } + ns := []*yaml.Node{ + { + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: item.Key, + }, + nn, + } + nodes = append(nodes, ns...) + } + + n.Content = []*yaml.Node{ + { + Kind: yaml.MappingNode, + Content: nodes, + }, + } + + return yaml.Marshal(&n) +} + +func json2yaml(item interface{}) (*yaml.Node, error) { + switch val := item.(type) { + case JSONMapSlice: + var n yaml.Node + n.Kind = yaml.MappingNode + for i := range val { + childNode, err := json2yaml(&val[i].Value) + if err != nil { + return nil, err + } + n.Content = append(n.Content, &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: val[i].Key, + }, childNode) + } + return &n, nil + case map[string]interface{}: + var n yaml.Node + n.Kind = yaml.MappingNode + for k, v := range val { + childNode, err := json2yaml(v) + if err != nil { + return nil, err + } + n.Content = append(n.Content, &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: k, + }, childNode) + } + return &n, nil + case []interface{}: + var n yaml.Node + n.Kind = yaml.SequenceNode + for i := range val { + childNode, err := json2yaml(val[i]) + if err != nil { + return nil, err + } + n.Content = append(n.Content, childNode) + } + return &n, nil + case string: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: val, + }, nil + case float64: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlFloatScalar, + Value: strconv.FormatFloat(val, 'f', -1, 64), + }, nil + case int64: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlIntScalar, + Value: strconv.FormatInt(val, 10), + }, nil + case uint64: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlIntScalar, + Value: strconv.FormatUint(val, 10), + }, nil + case bool: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlBoolScalar, + Value: strconv.FormatBool(val), + }, nil + } + return nil, nil +} + // JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice type JSONMapItem struct { Key string @@ -173,23 +390,10 @@ func transformData(input interface{}) (out interface{}, err error) { } switch in := input.(type) { - case yaml.MapSlice: - - o := make(JSONMapSlice, len(in)) - for i, mi := range in { - var nmi JSONMapItem - if nmi.Key, err = format(mi.Key); err != nil { - return nil, err - } - - v, ert := transformData(mi.Value) - if ert != nil { - return nil, ert - } - nmi.Value = v - o[i] = nmi - } - return o, nil + case yaml.Node: + return yamlNode(&in) + case *yaml.Node: + return yamlNode(in) case map[interface{}]interface{}: o := make(JSONMapSlice, 0, len(in)) for ke, va := range in { diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/accelerators/nvidia.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/accelerators/nvidia.go deleted file mode 100644 index 226b78dd46ec..000000000000 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/accelerators/nvidia.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package accelerators - -import ( - "bufio" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - "sync" - "time" - - "github.com/google/cadvisor/container" - info "github.com/google/cadvisor/info/v1" - "github.com/google/cadvisor/stats" - - "github.com/mindprince/gonvml" - "k8s.io/klog/v2" -) - -type nvidiaManager struct { - sync.Mutex - - // true if there are NVIDIA devices present on the node - devicesPresent bool - - // true if the NVML library (libnvidia-ml.so.1) was loaded successfully - nvmlInitialized bool - - // nvidiaDevices is a map from device minor number to a handle that can be used to get metrics about the device - nvidiaDevices map[int]gonvml.Device -} - -var sysFsPCIDevicesPath = "/sys/bus/pci/devices/" - -const nvidiaVendorID = "0x10de" - -func NewNvidiaManager(includedMetrics container.MetricSet) stats.Manager { - if !includedMetrics.Has(container.AcceleratorUsageMetrics) { - klog.V(2).Info("NVIDIA GPU metrics disabled") - return &stats.NoopManager{} - } - - manager := &nvidiaManager{} - err := manager.setup() - if err != nil { - klog.V(2).Infof("NVIDIA setup failed: %s", err) - } - return manager -} - -// setup initializes NVML if NVIDIA devices are present on the node. -func (nm *nvidiaManager) setup() error { - if !detectDevices(nvidiaVendorID) { - return fmt.Errorf("no NVIDIA devices found") - } - - nm.devicesPresent = true - - return initializeNVML(nm) -} - -// detectDevices returns true if a device with given pci id is present on the node. -func detectDevices(vendorID string) bool { - devices, err := ioutil.ReadDir(sysFsPCIDevicesPath) - if err != nil { - klog.Warningf("Error reading %q: %v", sysFsPCIDevicesPath, err) - return false - } - - for _, device := range devices { - vendorPath := filepath.Join(sysFsPCIDevicesPath, device.Name(), "vendor") - content, err := ioutil.ReadFile(vendorPath) - if err != nil { - klog.V(4).Infof("Error while reading %q: %v", vendorPath, err) - continue - } - if strings.EqualFold(strings.TrimSpace(string(content)), vendorID) { - klog.V(3).Infof("Found device with vendorID %q", vendorID) - return true - } - } - return false -} - -// initializeNVML initializes the NVML library and sets up the nvmlDevices map. -// This is defined as a variable to help in testing. -var initializeNVML = func(nm *nvidiaManager) error { - if err := gonvml.Initialize(); err != nil { - // This is under a logging level because otherwise we may cause - // log spam if the drivers/nvml is not installed on the system. - return fmt.Errorf("Could not initialize NVML: %v", err) - } - nm.nvmlInitialized = true - numDevices, err := gonvml.DeviceCount() - if err != nil { - return fmt.Errorf("GPU metrics would not be available. Failed to get the number of NVIDIA devices: %v", err) - } - if numDevices == 0 { - return nil - } - klog.V(1).Infof("NVML initialized. Number of NVIDIA devices: %v", numDevices) - nm.nvidiaDevices = make(map[int]gonvml.Device, numDevices) - for i := 0; i < int(numDevices); i++ { - device, err := gonvml.DeviceHandleByIndex(uint(i)) - if err != nil { - return fmt.Errorf("Failed to get NVIDIA device handle %d: %v", i, err) - } - minorNumber, err := device.MinorNumber() - if err != nil { - return fmt.Errorf("Failed to get NVIDIA device minor number: %v", err) - } - nm.nvidiaDevices[int(minorNumber)] = device - } - return nil -} - -// Destroy shuts down NVML. -func (nm *nvidiaManager) Destroy() { - if nm.nvmlInitialized { - err := gonvml.Shutdown() - if err != nil { - klog.Warningf("nvml library shutdown failed: %s", err) - } - } -} - -// GetCollector returns a collector that can fetch NVIDIA gpu metrics for NVIDIA devices -// present in the devices.list file in the given devicesCgroupPath. -func (nm *nvidiaManager) GetCollector(devicesCgroupPath string) (stats.Collector, error) { - nc := &nvidiaCollector{} - - if !nm.devicesPresent { - return &stats.NoopCollector{}, nil - } - // Makes sure that we don't call initializeNVML() concurrently and - // that we only call initializeNVML() when it's not initialized. - nm.Lock() - if !nm.nvmlInitialized { - err := initializeNVML(nm) - if err != nil { - nm.Unlock() - return &stats.NoopCollector{}, err - } - } - nm.Unlock() - if len(nm.nvidiaDevices) == 0 { - return &stats.NoopCollector{}, nil - } - nvidiaMinorNumbers, err := parseDevicesCgroup(devicesCgroupPath) - if err != nil { - return &stats.NoopCollector{}, err - } - - for _, minor := range nvidiaMinorNumbers { - device, ok := nm.nvidiaDevices[minor] - if !ok { - return &stats.NoopCollector{}, fmt.Errorf("NVIDIA device minor number %d not found in cached devices", minor) - } - nc.devices = append(nc.devices, device) - } - return nc, nil -} - -// parseDevicesCgroup parses the devices cgroup devices.list file for the container -// and returns a list of minor numbers corresponding to NVIDIA GPU devices that the -// container is allowed to access. In cases where the container has access to all -// devices or all NVIDIA devices but the devices are not enumerated separately in -// the devices.list file, we return an empty list. -// This is defined as a variable to help in testing. -var parseDevicesCgroup = func(devicesCgroupPath string) ([]int, error) { - // Always return a non-nil slice - nvidiaMinorNumbers := []int{} - - devicesList := filepath.Join(devicesCgroupPath, "devices.list") - f, err := os.Open(devicesList) - if err != nil { - return nvidiaMinorNumbers, fmt.Errorf("error while opening devices cgroup file %q: %v", devicesList, err) - } - defer f.Close() - - s := bufio.NewScanner(f) - - // See https://www.kernel.org/doc/Documentation/cgroup-v1/devices.txt for the file format - for s.Scan() { - text := s.Text() - - fields := strings.Fields(text) - if len(fields) != 3 { - return nvidiaMinorNumbers, fmt.Errorf("invalid devices cgroup entry %q: must contain three whitespace-separated fields", text) - } - - // Split the second field to find out major:minor numbers - majorMinor := strings.Split(fields[1], ":") - if len(majorMinor) != 2 { - return nvidiaMinorNumbers, fmt.Errorf("invalid devices cgroup entry %q: second field should have one colon", text) - } - - // NVIDIA graphics devices are character devices with major number 195. - // https://github.com/torvalds/linux/blob/v4.13/Documentation/admin-guide/devices.txt#L2583 - if fields[0] == "c" && majorMinor[0] == "195" { - minorNumber, err := strconv.Atoi(majorMinor[1]) - if err != nil { - return nvidiaMinorNumbers, fmt.Errorf("invalid devices cgroup entry %q: minor number is not integer", text) - } - // We don't want devices like nvidiactl (195:255) and nvidia-modeset (195:254) - if minorNumber < 128 { - nvidiaMinorNumbers = append(nvidiaMinorNumbers, minorNumber) - } - // We are ignoring the "195:*" case - // where the container has access to all NVIDIA devices on the machine. - } - // We are ignoring the "*:*" case - // where the container has access to all devices on the machine. - } - return nvidiaMinorNumbers, nil -} - -type nvidiaCollector struct { - // Exposed for testing - devices []gonvml.Device - - stats.NoopDestroy -} - -func NewNvidiaCollector(devices []gonvml.Device) stats.Collector { - return &nvidiaCollector{devices: devices} -} - -// UpdateStats updates the stats for NVIDIA GPUs (if any) attached to the container. -func (nc *nvidiaCollector) UpdateStats(stats *info.ContainerStats) error { - for _, device := range nc.devices { - model, err := device.Name() - if err != nil { - return fmt.Errorf("error while getting gpu name: %v", err) - } - uuid, err := device.UUID() - if err != nil { - return fmt.Errorf("error while getting gpu uuid: %v", err) - } - memoryTotal, memoryUsed, err := device.MemoryInfo() - if err != nil { - return fmt.Errorf("error while getting gpu memory info: %v", err) - } - //TODO: Use housekeepingInterval - utilizationGPU, err := device.AverageGPUUtilization(10 * time.Second) - if err != nil { - return fmt.Errorf("error while getting gpu utilization: %v", err) - } - - stats.Accelerators = append(stats.Accelerators, info.AcceleratorStats{ - Make: "nvidia", - Model: model, - ID: uuid, - MemoryTotal: memoryTotal, - MemoryUsed: memoryUsed, - DutyCycle: uint64(utilizationGPU), - }) - } - return nil -} diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/common/helpers.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/common/helpers.go index e028f279cfcd..7248f3b0b849 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/common/helpers.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/common/helpers.go @@ -172,7 +172,7 @@ func getSpecInternal(cgroupPaths map[string]string, machineInfoFactory info.Mach if cgroup2UnifiedMode { if utils.FileExists(path.Join(memoryRoot, "memory.max")) { spec.HasMemory = true - spec.Memory.Reservation = readUInt64(memoryRoot, "memory.high") + spec.Memory.Reservation = readUInt64(memoryRoot, "memory.min") spec.Memory.Limit = readUInt64(memoryRoot, "memory.max") spec.Memory.SwapLimit = readUInt64(memoryRoot, "memory.swap.max") } diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/factory.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/factory.go index 8f32f8c6af77..226a6fa6e5d6 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/factory.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/factory.go @@ -57,7 +57,6 @@ const ( NetworkTcpUsageMetrics MetricKind = "tcp" NetworkAdvancedTcpUsageMetrics MetricKind = "advtcp" NetworkUdpUsageMetrics MetricKind = "udp" - AcceleratorUsageMetrics MetricKind = "accelerator" AppMetrics MetricKind = "app" ProcessMetrics MetricKind = "process" HugetlbUsageMetrics MetricKind = "hugetlb" @@ -78,7 +77,6 @@ var AllMetrics = MetricSet{ MemoryNumaMetrics: struct{}{}, CpuLoadMetrics: struct{}{}, DiskIOMetrics: struct{}{}, - AcceleratorUsageMetrics: struct{}{}, DiskUsageMetrics: struct{}{}, NetworkUsageMetrics: struct{}{}, NetworkTcpUsageMetrics: struct{}{}, diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/info/v1/machine.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/info/v1/machine.go index 95d8b9ac4c9b..47a43604053c 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/info/v1/machine.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/info/v1/machine.go @@ -197,6 +197,9 @@ type MachineInfo struct { // The amount of memory (in bytes) in this machine MemoryCapacity uint64 `json:"memory_capacity"` + // The amount of swap (in bytes) in this machine + SwapCapacity uint64 `json:"swap_capacity"` + // Memory capacity and number of DIMMs by memory type MemoryByType map[string]*MemoryInfo `json:"memory_by_type"` diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/machine/info.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/machine/info.go index 0550a5fa2993..a7a3d3e0cdba 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/machine/info.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/machine/info.go @@ -79,6 +79,11 @@ func Info(sysFs sysfs.SysFs, fsInfo fs.FsInfo, inHostNamespace bool) (*info.Mach return nil, err } + swapCapacity, err := GetMachineSwapCapacity() + if err != nil { + return nil, err + } + nvmInfo, err := nvm.GetInfo() if err != nil { return nil, err @@ -128,6 +133,7 @@ func Info(sysFs sysfs.SysFs, fsInfo fs.FsInfo, inHostNamespace bool) (*info.Mach CpuFrequency: clockSpeed, MemoryCapacity: memoryCapacity, MemoryByType: memoryByType, + SwapCapacity: swapCapacity, NVMInfo: nvmInfo, HugePages: hugePagesInfo, DiskMap: diskMap, diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/manager/container.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/manager/container.go index a5f2040a6fa1..861e230cfc48 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/manager/container.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/manager/container.go @@ -40,6 +40,7 @@ import ( "github.com/google/cadvisor/utils/cpuload" "github.com/docker/go-units" + "k8s.io/klog/v2" "k8s.io/utils/clock" ) @@ -96,9 +97,6 @@ type containerData struct { // Runs custom metric collectors. collectorManager collector.CollectorManager - // nvidiaCollector updates stats for Nvidia GPUs attached to the container. - nvidiaCollector stats.Collector - // perfCollector updates stats for perf_event cgroup controller. perfCollector stats.Collector @@ -448,7 +446,6 @@ func newContainerData(containerName string, memoryCache *memory.InMemoryCache, h onDemandChan: make(chan chan struct{}, 100), clock: clock, perfCollector: &stats.NoopCollector{}, - nvidiaCollector: &stats.NoopCollector{}, resctrlCollector: &stats.NoopCollector{}, } cont.info.ContainerReference = ref @@ -688,12 +685,6 @@ func (cd *containerData) updateStats() error { } } - var nvidiaStatsErr error - if cd.nvidiaCollector != nil { - // This updates the Accelerators field of the stats struct - nvidiaStatsErr = cd.nvidiaCollector.UpdateStats(stats) - } - perfStatsErr := cd.perfCollector.UpdateStats(stats) resctrlStatsErr := cd.resctrlCollector.UpdateStats(stats) @@ -718,10 +709,6 @@ func (cd *containerData) updateStats() error { if statsErr != nil { return statsErr } - if nvidiaStatsErr != nil { - klog.Errorf("error occurred while collecting nvidia stats for container %s: %s", cInfo.Name, err) - return nvidiaStatsErr - } if perfStatsErr != nil { klog.Errorf("error occurred while collecting perf stats for container %s: %s", cInfo.Name, err) return perfStatsErr diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/manager/manager.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/manager/manager.go index 980e0a3863a2..8c4396d65dc6 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/manager/manager.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/manager/manager.go @@ -27,7 +27,6 @@ import ( "sync/atomic" "time" - "github.com/google/cadvisor/accelerators" "github.com/google/cadvisor/cache/memory" "github.com/google/cadvisor/collector" "github.com/google/cadvisor/container" @@ -199,7 +198,6 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, houskeepingConfig containerWatchers: []watcher.ContainerWatcher{}, eventsChannel: eventsChannel, collectorHTTPClient: collectorHTTPClient, - nvidiaManager: accelerators.NewNvidiaManager(includedMetricsSet), rawContainerCgroupPathPrefixWhiteList: rawContainerCgroupPathPrefixWhiteList, containerEnvMetadataWhiteList: containerEnvMetadataWhiteList, } @@ -259,7 +257,6 @@ type manager struct { containerWatchers []watcher.ContainerWatcher eventsChannel chan watcher.ContainerEvent collectorHTTPClient *http.Client - nvidiaManager stats.Manager perfManager stats.Manager resctrlManager resctrl.Manager // List of raw container cgroup path prefix whitelist. @@ -327,7 +324,6 @@ func (m *manager) Start() error { } func (m *manager) Stop() error { - defer m.nvidiaManager.Destroy() defer m.destroyCollectors() // Stop and wait on all quit channels. for i, c := range m.quitChannels { @@ -934,17 +930,6 @@ func (m *manager) createContainerLocked(containerName string, watchSource watche return err } - if !cgroups.IsCgroup2UnifiedMode() { - devicesCgroupPath, err := handler.GetCgroupPath("devices") - if err != nil { - klog.Warningf("Error getting devices cgroup path: %v", err) - } else { - cont.nvidiaCollector, err = m.nvidiaManager.GetCollector(devicesCgroupPath) - if err != nil { - klog.V(4).Infof("GPU metrics may be unavailable/incomplete for container %s: %s", cont.info.Name, err) - } - } - } if m.includedMetrics.Has(container.PerfMetrics) { perfCgroupPath, err := handler.GetCgroupPath("perf_event") if err != nil { diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/metrics/prometheus.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/metrics/prometheus.go index 04c8d27e8f7b..75fc2905fde4 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/metrics/prometheus.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/metrics/prometheus.go @@ -25,6 +25,7 @@ import ( v2 "github.com/google/cadvisor/info/v2" "github.com/prometheus/client_golang/prometheus" + "k8s.io/klog/v2" "k8s.io/utils/clock" ) @@ -492,59 +493,6 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri }, }...) } - if includedMetrics.Has(container.AcceleratorUsageMetrics) { - c.containerMetrics = append(c.containerMetrics, []containerMetric{ - { - name: "container_accelerator_memory_total_bytes", - help: "Total accelerator memory.", - valueType: prometheus.GaugeValue, - extraLabels: []string{"make", "model", "acc_id"}, - getValues: func(s *info.ContainerStats) metricValues { - values := make(metricValues, 0, len(s.Accelerators)) - for _, value := range s.Accelerators { - values = append(values, metricValue{ - value: float64(value.MemoryTotal), - labels: []string{value.Make, value.Model, value.ID}, - timestamp: s.Timestamp, - }) - } - return values - }, - }, { - name: "container_accelerator_memory_used_bytes", - help: "Total accelerator memory allocated.", - valueType: prometheus.GaugeValue, - extraLabels: []string{"make", "model", "acc_id"}, - getValues: func(s *info.ContainerStats) metricValues { - values := make(metricValues, 0, len(s.Accelerators)) - for _, value := range s.Accelerators { - values = append(values, metricValue{ - value: float64(value.MemoryUsed), - labels: []string{value.Make, value.Model, value.ID}, - timestamp: s.Timestamp, - }) - } - return values - }, - }, { - name: "container_accelerator_duty_cycle", - help: "Percent of time over the past sample period during which the accelerator was actively processing.", - valueType: prometheus.GaugeValue, - extraLabels: []string{"make", "model", "acc_id"}, - getValues: func(s *info.ContainerStats) metricValues { - values := make(metricValues, 0, len(s.Accelerators)) - for _, value := range s.Accelerators { - values = append(values, metricValue{ - value: float64(value.DutyCycle), - labels: []string{value.Make, value.Model, value.ID}, - timestamp: s.Timestamp, - }) - } - return values - }, - }, - }...) - } if includedMetrics.Has(container.DiskUsageMetrics) { c.containerMetrics = append(c.containerMetrics, []containerMetric{ { diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/cel/program.go b/cluster-autoscaler/vendor/github.com/google/cel-go/cel/program.go index 672c83ef7167..6219a4da588e 100644 --- a/cluster-autoscaler/vendor/github.com/google/cel-go/cel/program.go +++ b/cluster-autoscaler/vendor/github.com/google/cel-go/cel/program.go @@ -213,7 +213,10 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) { factory := func(state interpreter.EvalState, costTracker *interpreter.CostTracker) (Program, error) { costTracker.Estimator = p.callCostEstimator costTracker.Limit = p.costLimit - decs := decorators + // Limit capacity to guarantee a reallocation when calling 'append(decs, ...)' below. This + // prevents the underlying memory from being shared between factory function calls causing + // undesired mutations. + decs := decorators[:len(decorators):len(decorators)] var observers []interpreter.EvalObserver if p.evalOpts&(OptExhaustiveEval|OptTrackState) != 0 { diff --git a/cluster-autoscaler/vendor/github.com/google/uuid/hash.go b/cluster-autoscaler/vendor/github.com/google/uuid/hash.go index b17461631511..b404f4bec274 100644 --- a/cluster-autoscaler/vendor/github.com/google/uuid/hash.go +++ b/cluster-autoscaler/vendor/github.com/google/uuid/hash.go @@ -26,8 +26,8 @@ var ( // NewMD5 and NewSHA1. func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { h.Reset() - h.Write(space[:]) - h.Write(data) + h.Write(space[:]) //nolint:errcheck + h.Write(data) //nolint:errcheck s := h.Sum(nil) var uuid UUID copy(uuid[:], s) diff --git a/cluster-autoscaler/vendor/github.com/google/uuid/null.go b/cluster-autoscaler/vendor/github.com/google/uuid/null.go new file mode 100644 index 000000000000..d7fcbf286516 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/google/uuid/null.go @@ -0,0 +1,118 @@ +// Copyright 2021 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "fmt" +) + +var jsonNull = []byte("null") + +// NullUUID represents a UUID that may be null. +// NullUUID implements the SQL driver.Scanner interface so +// it can be used as a scan destination: +// +// var u uuid.NullUUID +// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u) +// ... +// if u.Valid { +// // use u.UUID +// } else { +// // NULL value +// } +// +type NullUUID struct { + UUID UUID + Valid bool // Valid is true if UUID is not NULL +} + +// Scan implements the SQL driver.Scanner interface. +func (nu *NullUUID) Scan(value interface{}) error { + if value == nil { + nu.UUID, nu.Valid = Nil, false + return nil + } + + err := nu.UUID.Scan(value) + if err != nil { + nu.Valid = false + return err + } + + nu.Valid = true + return nil +} + +// Value implements the driver Valuer interface. +func (nu NullUUID) Value() (driver.Value, error) { + if !nu.Valid { + return nil, nil + } + // Delegate to UUID Value function + return nu.UUID.Value() +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (nu NullUUID) MarshalBinary() ([]byte, error) { + if nu.Valid { + return nu.UUID[:], nil + } + + return []byte(nil), nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (nu *NullUUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(nu.UUID[:], data) + nu.Valid = true + return nil +} + +// MarshalText implements encoding.TextMarshaler. +func (nu NullUUID) MarshalText() ([]byte, error) { + if nu.Valid { + return nu.UUID.MarshalText() + } + + return jsonNull, nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (nu *NullUUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err != nil { + nu.Valid = false + return err + } + nu.UUID = id + nu.Valid = true + return nil +} + +// MarshalJSON implements json.Marshaler. +func (nu NullUUID) MarshalJSON() ([]byte, error) { + if nu.Valid { + return json.Marshal(nu.UUID) + } + + return jsonNull, nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (nu *NullUUID) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, jsonNull) { + *nu = NullUUID{} + return nil // valid null UUID + } + err := json.Unmarshal(data, &nu.UUID) + nu.Valid = err == nil + return err +} diff --git a/cluster-autoscaler/vendor/github.com/google/uuid/sql.go b/cluster-autoscaler/vendor/github.com/google/uuid/sql.go index f326b54db37a..2e02ec06c012 100644 --- a/cluster-autoscaler/vendor/github.com/google/uuid/sql.go +++ b/cluster-autoscaler/vendor/github.com/google/uuid/sql.go @@ -9,7 +9,7 @@ import ( "fmt" ) -// Scan implements sql.Scanner so UUIDs can be read from databases transparently +// Scan implements sql.Scanner so UUIDs can be read from databases transparently. // Currently, database types that map to string and []byte are supported. Please // consult database-specific driver documentation for matching types. func (uuid *UUID) Scan(src interface{}) error { diff --git a/cluster-autoscaler/vendor/github.com/google/uuid/uuid.go b/cluster-autoscaler/vendor/github.com/google/uuid/uuid.go index 524404cc5227..a57207aeb6fd 100644 --- a/cluster-autoscaler/vendor/github.com/google/uuid/uuid.go +++ b/cluster-autoscaler/vendor/github.com/google/uuid/uuid.go @@ -12,6 +12,7 @@ import ( "fmt" "io" "strings" + "sync" ) // A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC @@ -33,7 +34,27 @@ const ( Future // Reserved for future definition. ) -var rander = rand.Reader // random function +const randPoolSize = 16 * 16 + +var ( + rander = rand.Reader // random function + poolEnabled = false + poolMu sync.Mutex + poolPos = randPoolSize // protected with poolMu + pool [randPoolSize]byte // protected with poolMu +) + +type invalidLengthError struct{ len int } + +func (err invalidLengthError) Error() string { + return fmt.Sprintf("invalid UUID length: %d", err.len) +} + +// IsInvalidLengthError is matcher function for custom error invalidLengthError +func IsInvalidLengthError(err error) bool { + _, ok := err.(invalidLengthError) + return ok +} // Parse decodes s into a UUID or returns an error. Both the standard UUID // forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and @@ -68,7 +89,7 @@ func Parse(s string) (UUID, error) { } return uuid, nil default: - return uuid, fmt.Errorf("invalid UUID length: %d", len(s)) + return uuid, invalidLengthError{len(s)} } // s is now at least 36 bytes long // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx @@ -112,7 +133,7 @@ func ParseBytes(b []byte) (UUID, error) { } return uuid, nil default: - return uuid, fmt.Errorf("invalid UUID length: %d", len(b)) + return uuid, invalidLengthError{len(b)} } // s is now at least 36 bytes long // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx @@ -243,3 +264,31 @@ func SetRand(r io.Reader) { } rander = r } + +// EnableRandPool enables internal randomness pool used for Random +// (Version 4) UUID generation. The pool contains random bytes read from +// the random number generator on demand in batches. Enabling the pool +// may improve the UUID generation throughput significantly. +// +// Since the pool is stored on the Go heap, this feature may be a bad fit +// for security sensitive applications. +// +// Both EnableRandPool and DisableRandPool are not thread-safe and should +// only be called when there is no possibility that New or any other +// UUID Version 4 generation function will be called concurrently. +func EnableRandPool() { + poolEnabled = true +} + +// DisableRandPool disables the randomness pool if it was previously +// enabled with EnableRandPool. +// +// Both EnableRandPool and DisableRandPool are not thread-safe and should +// only be called when there is no possibility that New or any other +// UUID Version 4 generation function will be called concurrently. +func DisableRandPool() { + poolEnabled = false + defer poolMu.Unlock() + poolMu.Lock() + poolPos = randPoolSize +} diff --git a/cluster-autoscaler/vendor/github.com/google/uuid/version4.go b/cluster-autoscaler/vendor/github.com/google/uuid/version4.go index c110465db590..7697802e4d16 100644 --- a/cluster-autoscaler/vendor/github.com/google/uuid/version4.go +++ b/cluster-autoscaler/vendor/github.com/google/uuid/version4.go @@ -14,11 +14,21 @@ func New() UUID { return Must(NewRandom()) } +// NewString creates a new random UUID and returns it as a string or panics. +// NewString is equivalent to the expression +// +// uuid.New().String() +func NewString() string { + return Must(NewRandom()).String() +} + // NewRandom returns a Random (Version 4) UUID. // // The strength of the UUIDs is based on the strength of the crypto/rand // package. // +// Uses the randomness pool if it was enabled with EnableRandPool. +// // A note about uniqueness derived from the UUID Wikipedia entry: // // Randomly generated UUIDs have 122 random bits. One's annual risk of being @@ -27,7 +37,10 @@ func New() UUID { // equivalent to the odds of creating a few tens of trillions of UUIDs in a // year and having one duplicate. func NewRandom() (UUID, error) { - return NewRandomFromReader(rander) + if !poolEnabled { + return NewRandomFromReader(rander) + } + return newRandomFromPool() } // NewRandomFromReader returns a UUID based on bytes read from a given io.Reader. @@ -41,3 +54,23 @@ func NewRandomFromReader(r io.Reader) (UUID, error) { uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 return uuid, nil } + +func newRandomFromPool() (UUID, error) { + var uuid UUID + poolMu.Lock() + if poolPos == randPoolSize { + _, err := io.ReadFull(rander, pool[:]) + if err != nil { + poolMu.Unlock() + return Nil, err + } + poolPos = 0 + } + copy(uuid[:], pool[poolPos:(poolPos+16)]) + poolPos += 16 + poolMu.Unlock() + + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} diff --git a/cluster-autoscaler/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/cluster-autoscaler/vendor/github.com/mailru/easyjson/jlexer/lexer.go index a42e9d65ad79..b5f5e2613293 100644 --- a/cluster-autoscaler/vendor/github.com/mailru/easyjson/jlexer/lexer.go +++ b/cluster-autoscaler/vendor/github.com/mailru/easyjson/jlexer/lexer.go @@ -401,6 +401,7 @@ func (r *Lexer) scanToken() { // consume resets the current token to allow scanning the next one. func (r *Lexer) consume() { r.token.kind = tokenUndef + r.token.byteValueCloned = false r.token.delimValue = 0 } @@ -528,6 +529,7 @@ func (r *Lexer) Skip() { func (r *Lexer) SkipRecursive() { r.scanToken() var start, end byte + startPos := r.start switch r.token.delimValue { case '{': @@ -553,6 +555,14 @@ func (r *Lexer) SkipRecursive() { level-- if level == 0 { r.pos += i + 1 + if !json.Valid(r.Data[startPos:r.pos]) { + r.pos = len(r.Data) + r.fatalError = &LexerError{ + Reason: "skipped array/object json value is invalid", + Offset: r.pos, + Data: string(r.Data[r.pos:]), + } + } return } case c == '\\' && inQuotes: @@ -702,6 +712,10 @@ func (r *Lexer) Bytes() []byte { r.errInvalidToken("string") return nil } + if err := r.unescapeStringToken(); err != nil { + r.errInvalidToken("string") + return nil + } ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue))) n, err := base64.StdEncoding.Decode(ret, r.token.byteValue) if err != nil { diff --git a/cluster-autoscaler/vendor/github.com/mindprince/gonvml/.travis.gofmt.sh b/cluster-autoscaler/vendor/github.com/mindprince/gonvml/.travis.gofmt.sh deleted file mode 100644 index 646934bb404b..000000000000 --- a/cluster-autoscaler/vendor/github.com/mindprince/gonvml/.travis.gofmt.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -if [ -n "$(gofmt -s -l .)" ]; then - echo "Go code is not properly formatted:" - gofmt -s -d -e . - exit 1 -fi diff --git a/cluster-autoscaler/vendor/github.com/mindprince/gonvml/.travis.yml b/cluster-autoscaler/vendor/github.com/mindprince/gonvml/.travis.yml deleted file mode 100644 index d45dc3c918c8..000000000000 --- a/cluster-autoscaler/vendor/github.com/mindprince/gonvml/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -go: - - "1.8" - - "1.9" - - "1.10" - -script: - - make presubmit diff --git a/cluster-autoscaler/vendor/github.com/mindprince/gonvml/LICENSE b/cluster-autoscaler/vendor/github.com/mindprince/gonvml/LICENSE deleted file mode 100644 index d64569567334..000000000000 --- a/cluster-autoscaler/vendor/github.com/mindprince/gonvml/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/cluster-autoscaler/vendor/github.com/mindprince/gonvml/Makefile b/cluster-autoscaler/vendor/github.com/mindprince/gonvml/Makefile deleted file mode 100644 index 65314b34b4a5..000000000000 --- a/cluster-autoscaler/vendor/github.com/mindprince/gonvml/Makefile +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -PKG=github.com/mindprince/gonvml - -.PHONY: build -build: - docker run -v $(shell pwd):/go/src/$(PKG) --workdir=/go/src/$(PKG) golang:1.8 go build cmd/example/example.go - -.PHONY: presubmit -presubmit: - ./.travis.gofmt.sh diff --git a/cluster-autoscaler/vendor/github.com/mindprince/gonvml/NVML_NOTICE b/cluster-autoscaler/vendor/github.com/mindprince/gonvml/NVML_NOTICE deleted file mode 100644 index 3fc131329d8a..000000000000 --- a/cluster-autoscaler/vendor/github.com/mindprince/gonvml/NVML_NOTICE +++ /dev/null @@ -1,32 +0,0 @@ -Copyright 1993-2016 NVIDIA Corporation. All rights reserved. - -NOTICE TO USER: - -This source code is subject to NVIDIA ownership rights under U.S. and -international Copyright laws. Users and possessors of this source code -are hereby granted a nonexclusive, royalty-free license to use this code -in individual and commercial software. - -NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE -CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR -IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH -REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. -IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, -OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS -OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE -OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE -OR PERFORMANCE OF THIS SOURCE CODE. - -U.S. Government End Users. This source code is a "commercial item" as -that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of -"commercial computer software" and "commercial computer software -documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) -and is provided to the U.S. Government only as a commercial end item. -Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through -227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the -source code with only those rights set forth herein. - -Any use of this source code in individual and commercial software must -include, in the user documentation and internal comments to the code, -the above Disclaimer and U.S. Government End Users Notice. diff --git a/cluster-autoscaler/vendor/github.com/mindprince/gonvml/README.md b/cluster-autoscaler/vendor/github.com/mindprince/gonvml/README.md deleted file mode 100644 index 91bbf5740789..000000000000 --- a/cluster-autoscaler/vendor/github.com/mindprince/gonvml/README.md +++ /dev/null @@ -1,19 +0,0 @@ -Go Bindings for NVML --------------------- - -[NVML or NVIDIA Management -Library](https://developer.nvidia.com/nvidia-management-library-nvml) is a -C-based API that can be used for monitoring NVIDIA GPU devices. It's closed -source but can be downloaded as part of the [GPU Deployment -Kit](https://developer.nvidia.com/gpu-deployment-kit). - -The [NVML API -Reference](http://docs.nvidia.com/deploy/nvml-api/nvml-api-reference.html) -describe various methods that are available as part of NVML. - -The `nvml.h` file is included in this repository so that we don't depend on -the presence of NVML in the build environment. - -The `bindings.go` file is the cgo bridge which calls the NVML functions. The -cgo preamble in `bindings.go` uses `dlopen` to dynamically load NVML and makes -its functions available. diff --git a/cluster-autoscaler/vendor/github.com/mindprince/gonvml/bindings.go b/cluster-autoscaler/vendor/github.com/mindprince/gonvml/bindings.go deleted file mode 100644 index abefe83eabf3..000000000000 --- a/cluster-autoscaler/vendor/github.com/mindprince/gonvml/bindings.go +++ /dev/null @@ -1,524 +0,0 @@ -/* -Copyright 2017 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gonvml - -// #cgo LDFLAGS: -ldl -/* -#include -#include -#include - -#include "nvml.h" - -// nvmlHandle is the handle for dynamically loaded libnvidia-ml.so -void *nvmlHandle; - -nvmlReturn_t (*nvmlInitFunc)(void); - -nvmlReturn_t (*nvmlShutdownFunc)(void); - -const char* (*nvmlErrorStringFunc)(nvmlReturn_t result); -const char* nvmlErrorString(nvmlReturn_t result) { - if (nvmlErrorStringFunc == NULL) { - return "nvmlErrorString Function Not Found"; - } - return nvmlErrorStringFunc(result); -} - -nvmlReturn_t (*nvmlSystemGetDriverVersionFunc)(char *version, unsigned int length); -nvmlReturn_t nvmlSystemGetDriverVersion(char *version, unsigned int length) { - if (nvmlSystemGetDriverVersionFunc == NULL) { - return NVML_ERROR_FUNCTION_NOT_FOUND; - } - return nvmlSystemGetDriverVersionFunc(version, length); -} - -nvmlReturn_t (*nvmlDeviceGetCountFunc)(unsigned int *deviceCount); -nvmlReturn_t nvmlDeviceGetCount(unsigned int *deviceCount) { - if (nvmlDeviceGetCountFunc == NULL) { - return NVML_ERROR_FUNCTION_NOT_FOUND; - } - return nvmlDeviceGetCountFunc(deviceCount); -} - -nvmlReturn_t (*nvmlDeviceGetHandleByIndexFunc)(unsigned int index, nvmlDevice_t *device); -nvmlReturn_t nvmlDeviceGetHandleByIndex(unsigned int index, nvmlDevice_t *device) { - if (nvmlDeviceGetHandleByIndexFunc == NULL) { - return NVML_ERROR_FUNCTION_NOT_FOUND; - } - return nvmlDeviceGetHandleByIndexFunc(index, device); -} - -nvmlReturn_t (*nvmlDeviceGetMinorNumberFunc)(nvmlDevice_t device, unsigned int *minorNumber); -nvmlReturn_t nvmlDeviceGetMinorNumber(nvmlDevice_t device, unsigned int *minorNumber) { - if (nvmlDeviceGetMinorNumberFunc == NULL) { - return NVML_ERROR_FUNCTION_NOT_FOUND; - } - return nvmlDeviceGetMinorNumberFunc(device, minorNumber); -} - -nvmlReturn_t (*nvmlDeviceGetUUIDFunc)(nvmlDevice_t device, char *uuid, unsigned int length); -nvmlReturn_t nvmlDeviceGetUUID(nvmlDevice_t device, char *uuid, unsigned int length) { - if (nvmlDeviceGetUUIDFunc == NULL) { - return NVML_ERROR_FUNCTION_NOT_FOUND; - } - return nvmlDeviceGetUUIDFunc(device, uuid, length); -} - -nvmlReturn_t (*nvmlDeviceGetNameFunc)(nvmlDevice_t device, char *name, unsigned int length); -nvmlReturn_t nvmlDeviceGetName(nvmlDevice_t device, char *name, unsigned int length) { - if (nvmlDeviceGetNameFunc == NULL) { - return NVML_ERROR_FUNCTION_NOT_FOUND; - } - return nvmlDeviceGetNameFunc(device, name, length); -} - -nvmlReturn_t (*nvmlDeviceGetMemoryInfoFunc)(nvmlDevice_t device, nvmlMemory_t *memory); -nvmlReturn_t nvmlDeviceGetMemoryInfo(nvmlDevice_t device, nvmlMemory_t *memory) { - if (nvmlDeviceGetMemoryInfoFunc == NULL) { - return NVML_ERROR_FUNCTION_NOT_FOUND; - } - return nvmlDeviceGetMemoryInfoFunc(device, memory); -} - -nvmlReturn_t (*nvmlDeviceGetUtilizationRatesFunc)(nvmlDevice_t device, nvmlUtilization_t *utilization); -nvmlReturn_t nvmlDeviceGetUtilizationRates(nvmlDevice_t device, nvmlUtilization_t *utilization) { - if (nvmlDeviceGetUtilizationRatesFunc == NULL) { - return NVML_ERROR_FUNCTION_NOT_FOUND; - } - return nvmlDeviceGetUtilizationRatesFunc(device, utilization); -} - -nvmlReturn_t (*nvmlDeviceGetPowerUsageFunc)(nvmlDevice_t device, unsigned int *power); -nvmlReturn_t nvmlDeviceGetPowerUsage(nvmlDevice_t device, unsigned int *power) { - if (nvmlDeviceGetPowerUsageFunc == NULL) { - return NVML_ERROR_FUNCTION_NOT_FOUND; - } - return nvmlDeviceGetPowerUsageFunc(device, power); -} - -nvmlReturn_t (*nvmlDeviceGetTemperatureFunc)(nvmlDevice_t device, nvmlTemperatureSensors_t sensorType, unsigned int *temp); -nvmlReturn_t nvmlDeviceGetTemperature(nvmlDevice_t device, nvmlTemperatureSensors_t sensorType, unsigned int *temp) { - if (nvmlDeviceGetTemperatureFunc == NULL) { - return NVML_ERROR_FUNCTION_NOT_FOUND; - } - return nvmlDeviceGetTemperatureFunc(device, sensorType, temp); -} - -nvmlReturn_t (*nvmlDeviceGetFanSpeedFunc)(nvmlDevice_t device, unsigned int *speed); -nvmlReturn_t nvmlDeviceGetFanSpeed(nvmlDevice_t device, unsigned int *speed) { - if (nvmlDeviceGetFanSpeedFunc == NULL) { - return NVML_ERROR_FUNCTION_NOT_FOUND; - } - return nvmlDeviceGetFanSpeedFunc(device, speed); -} - -nvmlReturn_t (*nvmlDeviceGetEncoderUtilizationFunc)(nvmlDevice_t device, unsigned int* utilization, unsigned int* samplingPeriodUs); -nvmlReturn_t nvmlDeviceGetEncoderUtilization(nvmlDevice_t device, unsigned int* utilization, unsigned int* samplingPeriodUs) { - if (nvmlDeviceGetEncoderUtilizationFunc == NULL) { - return NVML_ERROR_FUNCTION_NOT_FOUND; - } - return nvmlDeviceGetEncoderUtilizationFunc(device, utilization, samplingPeriodUs); -} - -nvmlReturn_t (*nvmlDeviceGetDecoderUtilizationFunc)(nvmlDevice_t device, unsigned int* utilization, unsigned int* samplingPeriodUs); -nvmlReturn_t nvmlDeviceGetDecoderUtilization(nvmlDevice_t device, unsigned int* utilization, unsigned int* samplingPeriodUs) { - if (nvmlDeviceGetDecoderUtilizationFunc == NULL) { - return NVML_ERROR_FUNCTION_NOT_FOUND; - } - return nvmlDeviceGetDecoderUtilizationFunc(device, utilization, samplingPeriodUs); -} - -nvmlReturn_t (*nvmlDeviceGetSamplesFunc)(nvmlDevice_t device, nvmlSamplingType_t type, unsigned long long lastSeenTimeStamp, nvmlValueType_t *sampleValType, unsigned int *sampleCount, nvmlSample_t *samples); - -// Loads the "libnvidia-ml.so.1" shared library. -// Loads all symbols needed and initializes NVML. -// Call this before calling any other methods. -nvmlReturn_t nvmlInit_dl(void) { - nvmlHandle = dlopen("libnvidia-ml.so.1", RTLD_LAZY); - if (nvmlHandle == NULL) { - return NVML_ERROR_LIBRARY_NOT_FOUND; - } - nvmlInitFunc = dlsym(nvmlHandle, "nvmlInit_v2"); - if (nvmlInitFunc == NULL) { - return NVML_ERROR_FUNCTION_NOT_FOUND; - } - nvmlShutdownFunc = dlsym(nvmlHandle, "nvmlShutdown"); - if (nvmlShutdownFunc == NULL) { - return NVML_ERROR_FUNCTION_NOT_FOUND; - } - nvmlErrorStringFunc = dlsym(nvmlHandle, "nvmlErrorString"); - if (nvmlErrorStringFunc == NULL) { - return NVML_ERROR_FUNCTION_NOT_FOUND; - } - nvmlSystemGetDriverVersionFunc = dlsym(nvmlHandle, "nvmlSystemGetDriverVersion"); - if (nvmlSystemGetDriverVersionFunc == NULL) { - return NVML_ERROR_FUNCTION_NOT_FOUND; - } - nvmlDeviceGetCountFunc = dlsym(nvmlHandle, "nvmlDeviceGetCount_v2"); - if (nvmlDeviceGetCountFunc == NULL) { - return NVML_ERROR_FUNCTION_NOT_FOUND; - } - nvmlDeviceGetHandleByIndexFunc = dlsym(nvmlHandle, "nvmlDeviceGetHandleByIndex_v2"); - if (nvmlDeviceGetHandleByIndexFunc == NULL) { - return NVML_ERROR_FUNCTION_NOT_FOUND; - } - nvmlDeviceGetMinorNumberFunc = dlsym(nvmlHandle, "nvmlDeviceGetMinorNumber"); - if (nvmlDeviceGetMinorNumberFunc == NULL) { - return NVML_ERROR_FUNCTION_NOT_FOUND; - } - nvmlDeviceGetUUIDFunc = dlsym(nvmlHandle, "nvmlDeviceGetUUID"); - if (nvmlDeviceGetUUIDFunc == NULL) { - return NVML_ERROR_FUNCTION_NOT_FOUND; - } - nvmlDeviceGetNameFunc = dlsym(nvmlHandle, "nvmlDeviceGetName"); - if (nvmlDeviceGetNameFunc == NULL) { - return NVML_ERROR_FUNCTION_NOT_FOUND; - } - nvmlDeviceGetMemoryInfoFunc = dlsym(nvmlHandle, "nvmlDeviceGetMemoryInfo"); - if (nvmlDeviceGetMemoryInfoFunc == NULL) { - return NVML_ERROR_FUNCTION_NOT_FOUND; - } - nvmlDeviceGetUtilizationRatesFunc = dlsym(nvmlHandle, "nvmlDeviceGetUtilizationRates"); - if (nvmlDeviceGetUtilizationRatesFunc == NULL) { - return NVML_ERROR_FUNCTION_NOT_FOUND; - } - nvmlDeviceGetPowerUsageFunc = dlsym(nvmlHandle, "nvmlDeviceGetPowerUsage"); - if (nvmlDeviceGetPowerUsageFunc == NULL) { - return NVML_ERROR_FUNCTION_NOT_FOUND; - } - nvmlDeviceGetTemperatureFunc = dlsym(nvmlHandle, "nvmlDeviceGetTemperature"); - if (nvmlDeviceGetTemperatureFunc == NULL) { - return NVML_ERROR_FUNCTION_NOT_FOUND; - } - nvmlDeviceGetFanSpeedFunc = dlsym(nvmlHandle, "nvmlDeviceGetFanSpeed"); - if (nvmlDeviceGetFanSpeedFunc == NULL) { - return NVML_ERROR_FUNCTION_NOT_FOUND; - } - nvmlDeviceGetSamplesFunc = dlsym(nvmlHandle, "nvmlDeviceGetSamples"); - if (nvmlDeviceGetSamplesFunc == NULL) { - return NVML_ERROR_FUNCTION_NOT_FOUND; - } - nvmlDeviceGetEncoderUtilizationFunc = dlsym(nvmlHandle, "nvmlDeviceGetEncoderUtilization"); - if (nvmlDeviceGetEncoderUtilizationFunc == NULL) { - return NVML_ERROR_FUNCTION_NOT_FOUND; - } - nvmlDeviceGetDecoderUtilizationFunc = dlsym(nvmlHandle, "nvmlDeviceGetDecoderUtilization"); - if (nvmlDeviceGetDecoderUtilizationFunc == NULL) { - return NVML_ERROR_FUNCTION_NOT_FOUND; - } - nvmlReturn_t result = nvmlInitFunc(); - if (result != NVML_SUCCESS) { - dlclose(nvmlHandle); - nvmlHandle = NULL; - return result; - } - return NVML_SUCCESS; -} - -// Shuts down NVML and decrements the reference count on the dynamically loaded -// "libnvidia-ml.so.1" library. -// Call this once NVML is no longer being used. -nvmlReturn_t nvmlShutdown_dl(void) { - if (nvmlHandle == NULL) { - return NVML_SUCCESS; - } - if (nvmlShutdownFunc == NULL) { - return NVML_ERROR_FUNCTION_NOT_FOUND; - } - nvmlReturn_t r = nvmlShutdownFunc(); - if (r != NVML_SUCCESS) { - return r; - } - return (dlclose(nvmlHandle) ? NVML_ERROR_UNKNOWN : NVML_SUCCESS); -} - -// This function is here because the API provided by NVML is not very user -// friendly. This function can be used to get average utilization.gpu or -// power.draw. -// -// `device`: The identifier of the target device. -// `type`: Type of sampling event. Only NVML_TOTAL_POWER_SAMPLES and NVML_GPU_UTILIZATION_SAMPLES are supported. -// `lastSeenTimeStamp`: Return average using samples with timestamp greather than this timestamp. Unix epoch in micro seconds. -// `averageUsage`: Reference in which average is returned. -// -// In my experiments, I found that NVML_GPU_UTILIZATION_SAMPLES buffer stores -// 100 samples that are uniformly spread with ~6 samples per second. So the -// buffer stores last ~16s of data. -// NVML_TOTAL_POWER_SAMPLES buffer stores 120 samples, but in different runs I -// noticed them to be non-uniformly separated. Sometimes 120 samples only -// consisted of 10s of data and sometimes they were spread over 60s. -// -nvmlReturn_t nvmlDeviceGetAverageUsage(nvmlDevice_t device, nvmlSamplingType_t type, unsigned long long lastSeenTimeStamp, unsigned int* averageUsage) { - if (nvmlHandle == NULL) { - return NVML_ERROR_LIBRARY_NOT_FOUND; - } - if (nvmlDeviceGetSamplesFunc == NULL) { - return NVML_ERROR_FUNCTION_NOT_FOUND; - } - - // We don't really use this because both the metrics we support - // averagePowerUsage and averageGPUUtilization are unsigned int. - nvmlValueType_t sampleValType; - - // This will be set to the number of samples that can be queried. We would - // need to allocate an array of this size to store the samples. - unsigned int sampleCount; - - // Invoking this method with `samples` set to NULL sets the sampleCount. - nvmlReturn_t r = nvmlDeviceGetSamplesFunc(device, type, lastSeenTimeStamp, &sampleValType, &sampleCount, NULL); - if (r != NVML_SUCCESS) { - return r; - } - - // Allocate memory to store sampleCount samples. - // In my experiments, the sampleCount at this stage was always 120 for - // NVML_TOTAL_POWER_SAMPLES and 100 for NVML_GPU_UTILIZATION_SAMPLES - nvmlSample_t* samples = (nvmlSample_t*) malloc(sampleCount * sizeof(nvmlSample_t)); - - r = nvmlDeviceGetSamplesFunc(device, type, lastSeenTimeStamp, &sampleValType, &sampleCount, samples); - if (r != NVML_SUCCESS) { - free(samples); - return r; - } - - int i = 0; - unsigned int sum = 0; - for (; i < sampleCount; i++) { - sum += samples[i].sampleValue.uiVal; - } - *averageUsage = sum/sampleCount; - - free(samples); - return r; -} -*/ -import "C" - -import ( - "errors" - "fmt" - "time" -) - -const ( - szDriver = C.NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE - szName = C.NVML_DEVICE_NAME_BUFFER_SIZE - szUUID = C.NVML_DEVICE_UUID_BUFFER_SIZE -) - -var errLibraryNotLoaded = errors.New("could not load NVML library") - -// Initialize initializes NVML. -// Call this before calling any other methods. -func Initialize() error { - return errorString(C.nvmlInit_dl()) -} - -// Shutdown shuts down NVML. -// Call this once NVML is no longer being used. -func Shutdown() error { - return errorString(C.nvmlShutdown_dl()) -} - -// errorString takes a nvmlReturn_t and converts it into a golang error. -// It uses a nvml method to convert to a user friendly error message. -func errorString(ret C.nvmlReturn_t) error { - if ret == C.NVML_SUCCESS { - return nil - } - // We need to special case this because if nvml library is not found - // nvmlErrorString() method will not work. - if ret == C.NVML_ERROR_LIBRARY_NOT_FOUND || C.nvmlHandle == nil { - return errLibraryNotLoaded - } - err := C.GoString(C.nvmlErrorString(ret)) - return fmt.Errorf("nvml: %v", err) -} - -// SystemDriverVersion returns the the driver version on the system. -func SystemDriverVersion() (string, error) { - if C.nvmlHandle == nil { - return "", errLibraryNotLoaded - } - var driver [szDriver]C.char - r := C.nvmlSystemGetDriverVersion(&driver[0], szDriver) - return C.GoString(&driver[0]), errorString(r) -} - -// DeviceCount returns the number of nvidia devices on the system. -func DeviceCount() (uint, error) { - if C.nvmlHandle == nil { - return 0, errLibraryNotLoaded - } - var n C.uint - r := C.nvmlDeviceGetCount(&n) - return uint(n), errorString(r) -} - -// Device is the handle for the device. -// This handle is obtained by calling DeviceHandleByIndex(). -type Device struct { - dev C.nvmlDevice_t -} - -// DeviceHandleByIndex returns the device handle for a particular index. -// The indices range from 0 to DeviceCount()-1. The order in which NVML -// enumerates devices has no guarantees of consistency between reboots. -func DeviceHandleByIndex(idx uint) (Device, error) { - if C.nvmlHandle == nil { - return Device{}, errLibraryNotLoaded - } - var dev C.nvmlDevice_t - r := C.nvmlDeviceGetHandleByIndex(C.uint(idx), &dev) - return Device{dev}, errorString(r) -} - -// MinorNumber returns the minor number for the device. -// The minor number for the device is such that the Nvidia device node -// file for each GPU will have the form /dev/nvidia[minor number]. -func (d Device) MinorNumber() (uint, error) { - if C.nvmlHandle == nil { - return 0, errLibraryNotLoaded - } - var n C.uint - r := C.nvmlDeviceGetMinorNumber(d.dev, &n) - return uint(n), errorString(r) -} - -// UUID returns the globally unique immutable UUID associated with this device. -func (d Device) UUID() (string, error) { - if C.nvmlHandle == nil { - return "", errLibraryNotLoaded - } - var uuid [szUUID]C.char - r := C.nvmlDeviceGetUUID(d.dev, &uuid[0], szUUID) - return C.GoString(&uuid[0]), errorString(r) -} - -// Name returns the product name of the device. -func (d Device) Name() (string, error) { - if C.nvmlHandle == nil { - return "", errLibraryNotLoaded - } - var name [szName]C.char - r := C.nvmlDeviceGetName(d.dev, &name[0], szName) - return C.GoString(&name[0]), errorString(r) -} - -// MemoryInfo returns the total and used memory (in bytes) of the device. -func (d Device) MemoryInfo() (uint64, uint64, error) { - if C.nvmlHandle == nil { - return 0, 0, errLibraryNotLoaded - } - var memory C.nvmlMemory_t - r := C.nvmlDeviceGetMemoryInfo(d.dev, &memory) - return uint64(memory.total), uint64(memory.used), errorString(r) -} - -// UtilizationRates returns the percent of time over the past sample period during which: -// utilization.gpu: one or more kernels were executing on the GPU. -// utilization.memory: global (device) memory was being read or written. -func (d Device) UtilizationRates() (uint, uint, error) { - if C.nvmlHandle == nil { - return 0, 0, errLibraryNotLoaded - } - var utilization C.nvmlUtilization_t - r := C.nvmlDeviceGetUtilizationRates(d.dev, &utilization) - return uint(utilization.gpu), uint(utilization.memory), errorString(r) -} - -// PowerUsage returns the power usage for this GPU and its associated circuitry -// in milliwatts. The reading is accurate to within +/- 5% of current power draw. -func (d Device) PowerUsage() (uint, error) { - if C.nvmlHandle == nil { - return 0, errLibraryNotLoaded - } - var n C.uint - r := C.nvmlDeviceGetPowerUsage(d.dev, &n) - return uint(n), errorString(r) -} - -// AveragePowerUsage returns the power usage for this GPU and its associated circuitry -// in milliwatts averaged over the samples collected in the last `since` duration. -func (d Device) AveragePowerUsage(since time.Duration) (uint, error) { - if C.nvmlHandle == nil { - return 0, errLibraryNotLoaded - } - lastTs := C.ulonglong(time.Now().Add(-1*since).UnixNano() / 1000) - var n C.uint - r := C.nvmlDeviceGetAverageUsage(d.dev, C.NVML_TOTAL_POWER_SAMPLES, lastTs, &n) - return uint(n), errorString(r) -} - -// AverageGPUUtilization returns the utilization.gpu metric (percent of time -// one of more kernels were executing on the GPU) averaged over the samples -// collected in the last `since` duration. -func (d Device) AverageGPUUtilization(since time.Duration) (uint, error) { - if C.nvmlHandle == nil { - return 0, errLibraryNotLoaded - } - lastTs := C.ulonglong(time.Now().Add(-1*since).UnixNano() / 1000) - var n C.uint - r := C.nvmlDeviceGetAverageUsage(d.dev, C.NVML_GPU_UTILIZATION_SAMPLES, lastTs, &n) - return uint(n), errorString(r) -} - -// Temperature returns the temperature for this GPU in Celsius. -func (d Device) Temperature() (uint, error) { - if C.nvmlHandle == nil { - return 0, errLibraryNotLoaded - } - var n C.uint - r := C.nvmlDeviceGetTemperature(d.dev, C.NVML_TEMPERATURE_GPU, &n) - return uint(n), errorString(r) -} - -// FanSpeed returns the temperature for this GPU in the percentage of its full -// speed, with 100 being the maximum. -func (d Device) FanSpeed() (uint, error) { - if C.nvmlHandle == nil { - return 0, errLibraryNotLoaded - } - var n C.uint - r := C.nvmlDeviceGetFanSpeed(d.dev, &n) - return uint(n), errorString(r) -} - -// EncoderUtilization returns the percent of time over the last sample period during which the GPU video encoder was being used. -// The sampling period is variable and is returned in the second return argument in microseconds. -func (d Device) EncoderUtilization() (uint, uint, error) { - if C.nvmlHandle == nil { - return 0, 0, errLibraryNotLoaded - } - var n C.uint - var sp C.uint - r := C.nvmlDeviceGetEncoderUtilization(d.dev, &n, &sp) - return uint(n), uint(sp), errorString(r) -} - -// DecoderUtilization returns the percent of time over the last sample period during which the GPU video decoder was being used. -// The sampling period is variable and is returned in the second return argument in microseconds. -func (d Device) DecoderUtilization() (uint, uint, error) { - if C.nvmlHandle == nil { - return 0, 0, errLibraryNotLoaded - } - var n C.uint - var sp C.uint - r := C.nvmlDeviceGetDecoderUtilization(d.dev, &n, &sp) - return uint(n), uint(sp), errorString(r) -} diff --git a/cluster-autoscaler/vendor/github.com/mindprince/gonvml/bindings_nocgo.go b/cluster-autoscaler/vendor/github.com/mindprince/gonvml/bindings_nocgo.go deleted file mode 100644 index ddbec565077f..000000000000 --- a/cluster-autoscaler/vendor/github.com/mindprince/gonvml/bindings_nocgo.go +++ /dev/null @@ -1,115 +0,0 @@ -// +build !cgo - -package gonvml - -import ( - "errors" - "time" -) - -var errNoCgo = errors.New("this binary is built without CGO, NVML is disabled") - -// Initialize initializes NVML. -// Call this before calling any other methods. -func Initialize() error { - return errNoCgo -} - -// Shutdown shuts down NVML. -// Call this once NVML is no longer being used. -func Shutdown() error { - return errNoCgo -} - -// SystemDriverVersion returns the the driver version on the system. -func SystemDriverVersion() (string, error) { - return "", errNoCgo -} - -// DeviceCount returns the number of nvidia devices on the system. -func DeviceCount() (uint, error) { - return 0, errNoCgo -} - -// Device is the handle for the device. -// This handle is obtained by calling DeviceHandleByIndex(). -type Device struct { -} - -// DeviceHandleByIndex returns the device handle for a particular index. -// The indices range from 0 to DeviceCount()-1. The order in which NVML -// enumerates devices has no guarantees of consistency between reboots. -func DeviceHandleByIndex(idx uint) (Device, error) { - return Device{}, errNoCgo -} - -// MinorNumber returns the minor number for the device. -// The minor number for the device is such that the Nvidia device node -// file for each GPU will have the form /dev/nvidia[minor number]. -func (d Device) MinorNumber() (uint, error) { - return 0, errNoCgo -} - -// UUID returns the globally unique immutable UUID associated with this device. -func (d Device) UUID() (string, error) { - return "", errNoCgo -} - -// Name returns the product name of the device. -func (d Device) Name() (string, error) { - return "", errNoCgo -} - -// MemoryInfo returns the total and used memory (in bytes) of the device. -func (d Device) MemoryInfo() (uint64, uint64, error) { - return 0, 0, errNoCgo -} - -// UtilizationRates returns the percent of time over the past sample period during which: -// utilization.gpu: one or more kernels were executing on the GPU. -// utilizatoin.memory: global (device) memory was being read or written. -func (d Device) UtilizationRates() (uint, uint, error) { - return 0, 0, errNoCgo -} - -// PowerUsage returns the power usage for this GPU and its associated circuitry -// in milliwatts. The reading is accurate to within +/- 5% of current power draw. -func (d Device) PowerUsage() (uint, error) { - return 0, errNoCgo -} - -// AveragePowerUsage returns the power usage for this GPU and its associated circuitry -// in milliwatts averaged over the samples collected in the last `since` duration. -func (d Device) AveragePowerUsage(since time.Duration) (uint, error) { - return 0, errNoCgo -} - -// AverageGPUUtilization returns the utilization.gpu metric (percent of time -// one of more kernels were executing on the GPU) averaged over the samples -// collected in the last `since` duration. -func (d Device) AverageGPUUtilization(since time.Duration) (uint, error) { - return 0, errNoCgo -} - -// Temperature returns the temperature for this GPU in Celsius. -func (d Device) Temperature() (uint, error) { - return 0, errNoCgo -} - -// FanSpeed returns the temperature for this GPU in the percentage of its full -// speed, with 100 being the maximum. -func (d Device) FanSpeed() (uint, error) { - return 0, errNoCgo -} - -// EncoderUtilization returns the percent of time over the last sample period during which the GPU video encoder was being used. -// The sampling period is variable and is returned in the second return argument in microseconds. -func (d Device) EncoderUtilization() (uint, uint, error) { - return 0, 0, errNoCgo -} - -// DecoderUtilization returns the percent of time over the last sample period during which the GPU video decoder was being used. -// The sampling period is variable and is returned in the second return argument in microseconds. -func (d Device) DecoderUtilization() (uint, uint, error) { - return 0, 0, errNoCgo -} diff --git a/cluster-autoscaler/vendor/github.com/mindprince/gonvml/nvml.h b/cluster-autoscaler/vendor/github.com/mindprince/gonvml/nvml.h deleted file mode 100644 index 6febac22b602..000000000000 --- a/cluster-autoscaler/vendor/github.com/mindprince/gonvml/nvml.h +++ /dev/null @@ -1,5605 +0,0 @@ -/* - * Copyright 1993-2016 NVIDIA Corporation. All rights reserved. - * - * NOTICE TO USER: - * - * This source code is subject to NVIDIA ownership rights under U.S. and - * international Copyright laws. Users and possessors of this source code - * are hereby granted a nonexclusive, royalty-free license to use this code - * in individual and commercial software. - * - * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE - * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR - * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH - * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. - * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, - * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS - * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE - * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE - * OR PERFORMANCE OF THIS SOURCE CODE. - * - * U.S. Government End Users. This source code is a "commercial item" as - * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of - * "commercial computer software" and "commercial computer software - * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) - * and is provided to the U.S. Government only as a commercial end item. - * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through - * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the - * source code with only those rights set forth herein. - * - * Any use of this source code in individual and commercial software must - * include, in the user documentation and internal comments to the code, - * the above Disclaimer and U.S. Government End Users Notice. - */ - -/* -NVML API Reference - -The NVIDIA Management Library (NVML) is a C-based programmatic interface for monitoring and -managing various states within NVIDIA Tesla &tm; GPUs. It is intended to be a platform for building -3rd party applications, and is also the underlying library for the NVIDIA-supported nvidia-smi -tool. NVML is thread-safe so it is safe to make simultaneous NVML calls from multiple threads. - -API Documentation - -Supported platforms: -- Windows: Windows Server 2008 R2 64bit, Windows Server 2012 R2 64bit, Windows 7 64bit, Windows 8 64bit, Windows 10 64bit -- Linux: 32-bit and 64-bit -- Hypervisors: Windows Server 2008R2/2012 Hyper-V 64bit, Citrix XenServer 6.2 SP1+, VMware ESX 5.1/5.5 - -Supported products: -- Full Support - - All Tesla products, starting with the Fermi architecture - - All Quadro products, starting with the Fermi architecture - - All GRID products, starting with the Kepler architecture - - Selected GeForce Titan products -- Limited Support - - All Geforce products, starting with the Fermi architecture - -The NVML library can be found at \%ProgramW6432\%\\"NVIDIA Corporation"\\NVSMI\\ on Windows. It is -not be added to the system path by default. To dynamically link to NVML, add this path to the PATH -environmental variable. To dynamically load NVML, call LoadLibrary with this path. - -On Linux the NVML library will be found on the standard library path. For 64 bit Linux, both the 32 bit -and 64 bit NVML libraries will be installed. - -Online documentation for this library is available at http://docs.nvidia.com/deploy/nvml-api/index.html -*/ - -#ifndef __nvml_nvml_h__ -#define __nvml_nvml_h__ - -#ifdef __cplusplus -extern "C" { -#endif - -/* - * On Windows, set up methods for DLL export - * define NVML_STATIC_IMPORT when using nvml_loader library - */ -#if defined _WINDOWS - #if !defined NVML_STATIC_IMPORT - #if defined NVML_LIB_EXPORT - #define DECLDIR __declspec(dllexport) - #else - #define DECLDIR __declspec(dllimport) - #endif - #else - #define DECLDIR - #endif -#else - #define DECLDIR -#endif - -/** - * NVML API versioning support - */ -#define NVML_API_VERSION 8 -#define NVML_API_VERSION_STR "8" -#define nvmlInit nvmlInit_v2 -#define nvmlDeviceGetPciInfo nvmlDeviceGetPciInfo_v2 -#define nvmlDeviceGetCount nvmlDeviceGetCount_v2 -#define nvmlDeviceGetHandleByIndex nvmlDeviceGetHandleByIndex_v2 -#define nvmlDeviceGetHandleByPciBusId nvmlDeviceGetHandleByPciBusId_v2 - -/***************************************************************************************************/ -/** @defgroup nvmlDeviceStructs Device Structs - * @{ - */ -/***************************************************************************************************/ - -/** - * Special constant that some fields take when they are not available. - * Used when only part of the struct is not available. - * - * Each structure explicitly states when to check for this value. - */ -#define NVML_VALUE_NOT_AVAILABLE (-1) - -typedef struct nvmlDevice_st* nvmlDevice_t; - -/** - * Buffer size guaranteed to be large enough for pci bus id - */ -#define NVML_DEVICE_PCI_BUS_ID_BUFFER_SIZE 16 - -/** - * PCI information about a GPU device. - */ -typedef struct nvmlPciInfo_st -{ - char busId[NVML_DEVICE_PCI_BUS_ID_BUFFER_SIZE]; //!< The tuple domain:bus:device.function PCI identifier (& NULL terminator) - unsigned int domain; //!< The PCI domain on which the device's bus resides, 0 to 0xffff - unsigned int bus; //!< The bus on which the device resides, 0 to 0xff - unsigned int device; //!< The device's id on the bus, 0 to 31 - unsigned int pciDeviceId; //!< The combined 16-bit device id and 16-bit vendor id - - // Added in NVML 2.285 API - unsigned int pciSubSystemId; //!< The 32-bit Sub System Device ID - - // NVIDIA reserved for internal use only - unsigned int reserved0; - unsigned int reserved1; - unsigned int reserved2; - unsigned int reserved3; -} nvmlPciInfo_t; - -/** - * Detailed ECC error counts for a device. - * - * @deprecated Different GPU families can have different memory error counters - * See \ref nvmlDeviceGetMemoryErrorCounter - */ -typedef struct nvmlEccErrorCounts_st -{ - unsigned long long l1Cache; //!< L1 cache errors - unsigned long long l2Cache; //!< L2 cache errors - unsigned long long deviceMemory; //!< Device memory errors - unsigned long long registerFile; //!< Register file errors -} nvmlEccErrorCounts_t; - -/** - * Utilization information for a device. - * Each sample period may be between 1 second and 1/6 second, depending on the product being queried. - */ -typedef struct nvmlUtilization_st -{ - unsigned int gpu; //!< Percent of time over the past sample period during which one or more kernels was executing on the GPU - unsigned int memory; //!< Percent of time over the past sample period during which global (device) memory was being read or written -} nvmlUtilization_t; - -/** - * Memory allocation information for a device. - */ -typedef struct nvmlMemory_st -{ - unsigned long long total; //!< Total installed FB memory (in bytes) - unsigned long long free; //!< Unallocated FB memory (in bytes) - unsigned long long used; //!< Allocated FB memory (in bytes). Note that the driver/GPU always sets aside a small amount of memory for bookkeeping -} nvmlMemory_t; - -/** - * BAR1 Memory allocation Information for a device - */ -typedef struct nvmlBAR1Memory_st -{ - unsigned long long bar1Total; //!< Total BAR1 Memory (in bytes) - unsigned long long bar1Free; //!< Unallocated BAR1 Memory (in bytes) - unsigned long long bar1Used; //!< Allocated Used Memory (in bytes) -}nvmlBAR1Memory_t; - -/** - * Information about running compute processes on the GPU - */ -typedef struct nvmlProcessInfo_st -{ - unsigned int pid; //!< Process ID - unsigned long long usedGpuMemory; //!< Amount of used GPU memory in bytes. - //! Under WDDM, \ref NVML_VALUE_NOT_AVAILABLE is always reported - //! because Windows KMD manages all the memory and not the NVIDIA driver -} nvmlProcessInfo_t; - -/** - * Enum to represent type of bridge chip - */ -typedef enum nvmlBridgeChipType_enum -{ - NVML_BRIDGE_CHIP_PLX = 0, - NVML_BRIDGE_CHIP_BRO4 = 1 -}nvmlBridgeChipType_t; - -/** - * Maximum number of NvLink links supported - */ -#define NVML_NVLINK_MAX_LINKS 6 - -/** - * Enum to represent the NvLink utilization counter packet units - */ -typedef enum nvmlNvLinkUtilizationCountUnits_enum -{ - NVML_NVLINK_COUNTER_UNIT_CYCLES = 0, // count by cycles - NVML_NVLINK_COUNTER_UNIT_PACKETS = 1, // count by packets - NVML_NVLINK_COUNTER_UNIT_BYTES = 2, // count by bytes - - // this must be last - NVML_NVLINK_COUNTER_UNIT_COUNT -} nvmlNvLinkUtilizationCountUnits_t; - -/** - * Enum to represent the NvLink utilization counter packet types to count - * ** this is ONLY applicable with the units as packets or bytes - * ** as specified in \a nvmlNvLinkUtilizationCountUnits_t - * ** all packet filter descriptions are target GPU centric - * ** these can be "OR'd" together - */ -typedef enum nvmlNvLinkUtilizationCountPktTypes_enum -{ - NVML_NVLINK_COUNTER_PKTFILTER_NOP = 0x1, // no operation packets - NVML_NVLINK_COUNTER_PKTFILTER_READ = 0x2, // read packets - NVML_NVLINK_COUNTER_PKTFILTER_WRITE = 0x4, // write packets - NVML_NVLINK_COUNTER_PKTFILTER_RATOM = 0x8, // reduction atomic requests - NVML_NVLINK_COUNTER_PKTFILTER_NRATOM = 0x10, // non-reduction atomic requests - NVML_NVLINK_COUNTER_PKTFILTER_FLUSH = 0x20, // flush requests - NVML_NVLINK_COUNTER_PKTFILTER_RESPDATA = 0x40, // responses with data - NVML_NVLINK_COUNTER_PKTFILTER_RESPNODATA = 0x80, // responses without data - NVML_NVLINK_COUNTER_PKTFILTER_ALL = 0xFF // all packets -} nvmlNvLinkUtilizationCountPktTypes_t; - -/** - * Struct to define the NVLINK counter controls - */ -typedef struct nvmlNvLinkUtilizationControl_st -{ - nvmlNvLinkUtilizationCountUnits_t units; - nvmlNvLinkUtilizationCountPktTypes_t pktfilter; -} nvmlNvLinkUtilizationControl_t; - -/** - * Enum to represent NvLink queryable capabilities - */ -typedef enum nvmlNvLinkCapability_enum -{ - NVML_NVLINK_CAP_P2P_SUPPORTED = 0, // P2P over NVLink is supported - NVML_NVLINK_CAP_SYSMEM_ACCESS = 1, // Access to system memory is supported - NVML_NVLINK_CAP_P2P_ATOMICS = 2, // P2P atomics are supported - NVML_NVLINK_CAP_SYSMEM_ATOMICS= 3, // System memory atomics are supported - NVML_NVLINK_CAP_SLI_BRIDGE = 4, // SLI is supported over this link - NVML_NVLINK_CAP_VALID = 5, // Link is supported on this device - // should be last - NVML_NVLINK_CAP_COUNT -} nvmlNvLinkCapability_t; - -/** - * Enum to represent NvLink queryable error counters - */ -typedef enum nvmlNvLinkErrorCounter_enum -{ - NVML_NVLINK_ERROR_DL_REPLAY = 0, // Data link transmit replay error counter - NVML_NVLINK_ERROR_DL_RECOVERY = 1, // Data link transmit recovery error counter - NVML_NVLINK_ERROR_DL_CRC_FLIT = 2, // Data link receive flow control digit CRC error counter - NVML_NVLINK_ERROR_DL_CRC_DATA = 3, // Data link receive data CRC error counter - - // this must be last - NVML_NVLINK_ERROR_COUNT -} nvmlNvLinkErrorCounter_t; - -/** - * Represents level relationships within a system between two GPUs - * The enums are spaced to allow for future relationships - */ -typedef enum nvmlGpuLevel_enum -{ - NVML_TOPOLOGY_INTERNAL = 0, // e.g. Tesla K80 - NVML_TOPOLOGY_SINGLE = 10, // all devices that only need traverse a single PCIe switch - NVML_TOPOLOGY_MULTIPLE = 20, // all devices that need not traverse a host bridge - NVML_TOPOLOGY_HOSTBRIDGE = 30, // all devices that are connected to the same host bridge - NVML_TOPOLOGY_CPU = 40, // all devices that are connected to the same CPU but possibly multiple host bridges - NVML_TOPOLOGY_SYSTEM = 50, // all devices in the system - - // there is purposefully no COUNT here because of the need for spacing above -} nvmlGpuTopologyLevel_t; - -/* P2P Capability Index Status*/ -typedef enum nvmlGpuP2PStatus_enum -{ - NVML_P2P_STATUS_OK = 0, - NVML_P2P_STATUS_CHIPSET_NOT_SUPPORED, - NVML_P2P_STATUS_GPU_NOT_SUPPORTED, - NVML_P2P_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED, - NVML_P2P_STATUS_DISABLED_BY_REGKEY, - NVML_P2P_STATUS_NOT_SUPPORTED, - NVML_P2P_STATUS_UNKNOWN - -} nvmlGpuP2PStatus_t; - -/* P2P Capability Index*/ -typedef enum nvmlGpuP2PCapsIndex_enum -{ - NVML_P2P_CAPS_INDEX_READ = 0, - NVML_P2P_CAPS_INDEX_WRITE, - NVML_P2P_CAPS_INDEX_NVLINK, - NVML_P2P_CAPS_INDEX_ATOMICS, - NVML_P2P_CAPS_INDEX_PROP, - NVML_P2P_CAPS_INDEX_UNKNOWN -}nvmlGpuP2PCapsIndex_t; - -/** - * Maximum limit on Physical Bridges per Board - */ -#define NVML_MAX_PHYSICAL_BRIDGE (128) - -/** - * Information about the Bridge Chip Firmware - */ -typedef struct nvmlBridgeChipInfo_st -{ - nvmlBridgeChipType_t type; //!< Type of Bridge Chip - unsigned int fwVersion; //!< Firmware Version. 0=Version is unavailable -}nvmlBridgeChipInfo_t; - -/** - * This structure stores the complete Hierarchy of the Bridge Chip within the board. The immediate - * bridge is stored at index 0 of bridgeInfoList, parent to immediate bridge is at index 1 and so forth. - */ -typedef struct nvmlBridgeChipHierarchy_st -{ - unsigned char bridgeCount; //!< Number of Bridge Chips on the Board - nvmlBridgeChipInfo_t bridgeChipInfo[NVML_MAX_PHYSICAL_BRIDGE]; //!< Hierarchy of Bridge Chips on the board -}nvmlBridgeChipHierarchy_t; - -/** - * Represents Type of Sampling Event - */ -typedef enum nvmlSamplingType_enum -{ - NVML_TOTAL_POWER_SAMPLES = 0, //!< To represent total power drawn by GPU - NVML_GPU_UTILIZATION_SAMPLES = 1, //!< To represent percent of time during which one or more kernels was executing on the GPU - NVML_MEMORY_UTILIZATION_SAMPLES = 2, //!< To represent percent of time during which global (device) memory was being read or written - NVML_ENC_UTILIZATION_SAMPLES = 3, //!< To represent percent of time during which NVENC remains busy - NVML_DEC_UTILIZATION_SAMPLES = 4, //!< To represent percent of time during which NVDEC remains busy - NVML_PROCESSOR_CLK_SAMPLES = 5, //!< To represent processor clock samples - NVML_MEMORY_CLK_SAMPLES = 6, //!< To represent memory clock samples - - // Keep this last - NVML_SAMPLINGTYPE_COUNT -}nvmlSamplingType_t; - -/** - * Represents the queryable PCIe utilization counters - */ -typedef enum nvmlPcieUtilCounter_enum -{ - NVML_PCIE_UTIL_TX_BYTES = 0, // 1KB granularity - NVML_PCIE_UTIL_RX_BYTES = 1, // 1KB granularity - - // Keep this last - NVML_PCIE_UTIL_COUNT -} nvmlPcieUtilCounter_t; - -/** - * Represents the type for sample value returned - */ -typedef enum nvmlValueType_enum -{ - NVML_VALUE_TYPE_DOUBLE = 0, - NVML_VALUE_TYPE_UNSIGNED_INT = 1, - NVML_VALUE_TYPE_UNSIGNED_LONG = 2, - NVML_VALUE_TYPE_UNSIGNED_LONG_LONG = 3, - NVML_VALUE_TYPE_SIGNED_LONG_LONG = 4, - - // Keep this last - NVML_VALUE_TYPE_COUNT -}nvmlValueType_t; - - -/** - * Union to represent different types of Value - */ -typedef union nvmlValue_st -{ - double dVal; //!< If the value is double - unsigned int uiVal; //!< If the value is unsigned int - unsigned long ulVal; //!< If the value is unsigned long - unsigned long long ullVal; //!< If the value is unsigned long long - signed long long sllVal; //!< If the value is signed long long -}nvmlValue_t; - -/** - * Information for Sample - */ -typedef struct nvmlSample_st -{ - unsigned long long timeStamp; //!< CPU Timestamp in microseconds - nvmlValue_t sampleValue; //!< Sample Value -}nvmlSample_t; - -/** - * Represents type of perf policy for which violation times can be queried - */ -typedef enum nvmlPerfPolicyType_enum -{ - NVML_PERF_POLICY_POWER = 0, //!< How long did power violations cause the GPU to be below application clocks - NVML_PERF_POLICY_THERMAL = 1, //!< How long did thermal violations cause the GPU to be below application clocks - NVML_PERF_POLICY_SYNC_BOOST = 2, //!< How long did sync boost cause the GPU to be below application clocks - NVML_PERF_POLICY_BOARD_LIMIT = 3, //!< How long did the board limit cause the GPU to be below application clocks - NVML_PERF_POLICY_LOW_UTILIZATION = 4, //!< How long did low utilization cause the GPU to be below application clocks - NVML_PERF_POLICY_RELIABILITY = 5, //!< How long did the board reliability limit cause the GPU to be below application clocks - - NVML_PERF_POLICY_TOTAL_APP_CLOCKS = 10, //!< Total time the GPU was held below application clocks by any limiter (0 - 5 above) - NVML_PERF_POLICY_TOTAL_BASE_CLOCKS = 11, //!< Total time the GPU was held below base clocks - - // Keep this last - NVML_PERF_POLICY_COUNT -}nvmlPerfPolicyType_t; - -/** - * Struct to hold perf policy violation status data - */ -typedef struct nvmlViolationTime_st -{ - unsigned long long referenceTime; //!< referenceTime represents CPU timestamp in microseconds - unsigned long long violationTime; //!< violationTime in Nanoseconds -}nvmlViolationTime_t; - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlDeviceEnumvs Device Enums - * @{ - */ -/***************************************************************************************************/ - -/** - * Generic enable/disable enum. - */ -typedef enum nvmlEnableState_enum -{ - NVML_FEATURE_DISABLED = 0, //!< Feature disabled - NVML_FEATURE_ENABLED = 1 //!< Feature enabled -} nvmlEnableState_t; - -//! Generic flag used to specify the default behavior of some functions. See description of particular functions for details. -#define nvmlFlagDefault 0x00 -//! Generic flag used to force some behavior. See description of particular functions for details. -#define nvmlFlagForce 0x01 - -/** - * * The Brand of the GPU - * */ -typedef enum nvmlBrandType_enum -{ - NVML_BRAND_UNKNOWN = 0, - NVML_BRAND_QUADRO = 1, - NVML_BRAND_TESLA = 2, - NVML_BRAND_NVS = 3, - NVML_BRAND_GRID = 4, - NVML_BRAND_GEFORCE = 5, - - // Keep this last - NVML_BRAND_COUNT -} nvmlBrandType_t; - -/** - * Temperature thresholds. - */ -typedef enum nvmlTemperatureThresholds_enum -{ - NVML_TEMPERATURE_THRESHOLD_SHUTDOWN = 0, // Temperature at which the GPU will shut down - // for HW protection - NVML_TEMPERATURE_THRESHOLD_SLOWDOWN = 1, // Temperature at which the GPU will begin slowdown - // Keep this last - NVML_TEMPERATURE_THRESHOLD_COUNT -} nvmlTemperatureThresholds_t; - -/** - * Temperature sensors. - */ -typedef enum nvmlTemperatureSensors_enum -{ - NVML_TEMPERATURE_GPU = 0, //!< Temperature sensor for the GPU die - - // Keep this last - NVML_TEMPERATURE_COUNT -} nvmlTemperatureSensors_t; - -/** - * Compute mode. - * - * NVML_COMPUTEMODE_EXCLUSIVE_PROCESS was added in CUDA 4.0. - * Earlier CUDA versions supported a single exclusive mode, - * which is equivalent to NVML_COMPUTEMODE_EXCLUSIVE_THREAD in CUDA 4.0 and beyond. - */ -typedef enum nvmlComputeMode_enum -{ - NVML_COMPUTEMODE_DEFAULT = 0, //!< Default compute mode -- multiple contexts per device - NVML_COMPUTEMODE_EXCLUSIVE_THREAD = 1, //!< Support Removed - NVML_COMPUTEMODE_PROHIBITED = 2, //!< Compute-prohibited mode -- no contexts per device - NVML_COMPUTEMODE_EXCLUSIVE_PROCESS = 3, //!< Compute-exclusive-process mode -- only one context per device, usable from multiple threads at a time - - // Keep this last - NVML_COMPUTEMODE_COUNT -} nvmlComputeMode_t; - -/** - * ECC bit types. - * - * @deprecated See \ref nvmlMemoryErrorType_t for a more flexible type - */ -#define nvmlEccBitType_t nvmlMemoryErrorType_t - -/** - * Single bit ECC errors - * - * @deprecated Mapped to \ref NVML_MEMORY_ERROR_TYPE_CORRECTED - */ -#define NVML_SINGLE_BIT_ECC NVML_MEMORY_ERROR_TYPE_CORRECTED - -/** - * Double bit ECC errors - * - * @deprecated Mapped to \ref NVML_MEMORY_ERROR_TYPE_UNCORRECTED - */ -#define NVML_DOUBLE_BIT_ECC NVML_MEMORY_ERROR_TYPE_UNCORRECTED - -/** - * Memory error types - */ -typedef enum nvmlMemoryErrorType_enum -{ - /** - * A memory error that was corrected - * - * For ECC errors, these are single bit errors - * For Texture memory, these are errors fixed by resend - */ - NVML_MEMORY_ERROR_TYPE_CORRECTED = 0, - /** - * A memory error that was not corrected - * - * For ECC errors, these are double bit errors - * For Texture memory, these are errors where the resend fails - */ - NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1, - - - // Keep this last - NVML_MEMORY_ERROR_TYPE_COUNT //!< Count of memory error types - -} nvmlMemoryErrorType_t; - -/** - * ECC counter types. - * - * Note: Volatile counts are reset each time the driver loads. On Windows this is once per boot. On Linux this can be more frequent. - * On Linux the driver unloads when no active clients exist. If persistence mode is enabled or there is always a driver - * client active (e.g. X11), then Linux also sees per-boot behavior. If not, volatile counts are reset each time a compute app - * is run. - */ -typedef enum nvmlEccCounterType_enum -{ - NVML_VOLATILE_ECC = 0, //!< Volatile counts are reset each time the driver loads. - NVML_AGGREGATE_ECC = 1, //!< Aggregate counts persist across reboots (i.e. for the lifetime of the device) - - // Keep this last - NVML_ECC_COUNTER_TYPE_COUNT //!< Count of memory counter types -} nvmlEccCounterType_t; - -/** - * Clock types. - * - * All speeds are in Mhz. - */ -typedef enum nvmlClockType_enum -{ - NVML_CLOCK_GRAPHICS = 0, //!< Graphics clock domain - NVML_CLOCK_SM = 1, //!< SM clock domain - NVML_CLOCK_MEM = 2, //!< Memory clock domain - NVML_CLOCK_VIDEO = 3, //!< Video encoder/decoder clock domain - - // Keep this last - NVML_CLOCK_COUNT //usedGpuMemory is not supported - - - unsigned long long time; //!< Amount of time in ms during which the compute context was active. The time is reported as 0 if - //!< the process is not terminated - - unsigned long long startTime; //!< CPU Timestamp in usec representing start time for the process - - unsigned int isRunning; //!< Flag to represent if the process is running (1 for running, 0 for terminated) - - unsigned int reserved[5]; //!< Reserved for future use -} nvmlAccountingStats_t; - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlVgpuConstants Vgpu Constants - * @{ - */ -/***************************************************************************************************/ - -/** - * Buffer size guaranteed to be large enough for \ref nvmlVgpuTypeGetLicense - */ -#define NVML_GRID_LICENSE_BUFFER_SIZE 128 - -#define NVML_VGPU_NAME_BUFFER_SIZE 64 - -#define NVML_MAX_VGPU_TYPES_PER_PGPU 17 - -#define NVML_MAX_VGPU_INSTANCES_PER_PGPU 24 - -#define NVML_GRID_LICENSE_FEATURE_MAX_COUNT 3 - -#define NVML_GRID_LICENSE_INFO_MAX_LENGTH 128 - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlVgpuEnum Vgpu Enum - * @{ - */ -/***************************************************************************************************/ - -/*! - * Types of VM identifiers - */ -typedef enum nvmlVgpuVmIdType { - NVML_VGPU_VM_ID_DOMAIN_ID = 0, //!< VM ID represents DOMAIN ID - NVML_VGPU_VM_ID_UUID = 1, //!< VM ID represents UUID -} nvmlVgpuVmIdType_t; - -// vGPU GUEST info state. -typedef enum nvmlVgpuGuestInfoState_enum -{ - NVML_VGPU_INSTANCE_GUEST_INFO_STATE_UNINITIALIZED = 0, //= 0 and < \a unitCount - * @param unit Reference in which to return the unit handle - * - * @return - * - \ref NVML_SUCCESS if \a unit has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a index is invalid or \a unit is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlUnitGetHandleByIndex(unsigned int index, nvmlUnit_t *unit); - -/** - * Retrieves the static information associated with a unit. - * - * For S-class products. - * - * See \ref nvmlUnitInfo_t for details on available unit info. - * - * @param unit The identifier of the target unit - * @param info Reference in which to return the unit information - * - * @return - * - \ref NVML_SUCCESS if \a info has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a info is NULL - */ -nvmlReturn_t DECLDIR nvmlUnitGetUnitInfo(nvmlUnit_t unit, nvmlUnitInfo_t *info); - -/** - * Retrieves the LED state associated with this unit. - * - * For S-class products. - * - * See \ref nvmlLedState_t for details on allowed states. - * - * @param unit The identifier of the target unit - * @param state Reference in which to return the current LED state - * - * @return - * - \ref NVML_SUCCESS if \a state has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a state is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlUnitSetLedState() - */ -nvmlReturn_t DECLDIR nvmlUnitGetLedState(nvmlUnit_t unit, nvmlLedState_t *state); - -/** - * Retrieves the PSU stats for the unit. - * - * For S-class products. - * - * See \ref nvmlPSUInfo_t for details on available PSU info. - * - * @param unit The identifier of the target unit - * @param psu Reference in which to return the PSU information - * - * @return - * - \ref NVML_SUCCESS if \a psu has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a psu is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlUnitGetPsuInfo(nvmlUnit_t unit, nvmlPSUInfo_t *psu); - -/** - * Retrieves the temperature readings for the unit, in degrees C. - * - * For S-class products. - * - * Depending on the product, readings may be available for intake (type=0), - * exhaust (type=1) and board (type=2). - * - * @param unit The identifier of the target unit - * @param type The type of reading to take - * @param temp Reference in which to return the intake temperature - * - * @return - * - \ref NVML_SUCCESS if \a temp has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit or \a type is invalid or \a temp is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlUnitGetTemperature(nvmlUnit_t unit, unsigned int type, unsigned int *temp); - -/** - * Retrieves the fan speed readings for the unit. - * - * For S-class products. - * - * See \ref nvmlUnitFanSpeeds_t for details on available fan speed info. - * - * @param unit The identifier of the target unit - * @param fanSpeeds Reference in which to return the fan speed information - * - * @return - * - \ref NVML_SUCCESS if \a fanSpeeds has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a fanSpeeds is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlUnitGetFanSpeedInfo(nvmlUnit_t unit, nvmlUnitFanSpeeds_t *fanSpeeds); - -/** - * Retrieves the set of GPU devices that are attached to the specified unit. - * - * For S-class products. - * - * The \a deviceCount argument is expected to be set to the size of the input \a devices array. - * - * @param unit The identifier of the target unit - * @param deviceCount Reference in which to provide the \a devices array size, and - * to return the number of attached GPU devices - * @param devices Reference in which to return the references to the attached GPU devices - * - * @return - * - \ref NVML_SUCCESS if \a deviceCount and \a devices have been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a deviceCount indicates that the \a devices array is too small - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid, either of \a deviceCount or \a devices is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlUnitGetDevices(nvmlUnit_t unit, unsigned int *deviceCount, nvmlDevice_t *devices); - -/** - * Retrieves the IDs and firmware versions for any Host Interface Cards (HICs) in the system. - * - * For S-class products. - * - * The \a hwbcCount argument is expected to be set to the size of the input \a hwbcEntries array. - * The HIC must be connected to an S-class system for it to be reported by this function. - * - * @param hwbcCount Size of hwbcEntries array - * @param hwbcEntries Array holding information about hwbc - * - * @return - * - \ref NVML_SUCCESS if \a hwbcCount and \a hwbcEntries have been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if either \a hwbcCount or \a hwbcEntries is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a hwbcCount indicates that the \a hwbcEntries array is too small - */ -nvmlReturn_t DECLDIR nvmlSystemGetHicVersion(unsigned int *hwbcCount, nvmlHwbcEntry_t *hwbcEntries); -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlDeviceQueries Device Queries - * This chapter describes that queries that NVML can perform against each device. - * In each case the device is identified with an nvmlDevice_t handle. This handle is obtained by - * calling one of \ref nvmlDeviceGetHandleByIndex(), \ref nvmlDeviceGetHandleBySerial(), - * \ref nvmlDeviceGetHandleByPciBusId(). or \ref nvmlDeviceGetHandleByUUID(). - * @{ - */ -/***************************************************************************************************/ - - /** - * Retrieves the number of compute devices in the system. A compute device is a single GPU. - * - * For all products. - * - * Note: New nvmlDeviceGetCount_v2 (default in NVML 5.319) returns count of all devices in the system - * even if nvmlDeviceGetHandleByIndex_v2 returns NVML_ERROR_NO_PERMISSION for such device. - * Update your code to handle this error, or use NVML 4.304 or older nvml header file. - * For backward binary compatibility reasons _v1 version of the API is still present in the shared - * library. - * Old _v1 version of nvmlDeviceGetCount doesn't count devices that NVML has no permission to talk to. - * - * @param deviceCount Reference in which to return the number of accessible devices - * - * @return - * - \ref NVML_SUCCESS if \a deviceCount has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a deviceCount is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetCount(unsigned int *deviceCount); - -/** - * Acquire the handle for a particular device, based on its index. - * - * For all products. - * - * Valid indices are derived from the \a accessibleDevices count returned by - * \ref nvmlDeviceGetCount(). For example, if \a accessibleDevices is 2 the valid indices - * are 0 and 1, corresponding to GPU 0 and GPU 1. - * - * The order in which NVML enumerates devices has no guarantees of consistency between reboots. For that reason it - * is recommended that devices be looked up by their PCI ids or UUID. See - * \ref nvmlDeviceGetHandleByUUID() and \ref nvmlDeviceGetHandleByPciBusId(). - * - * Note: The NVML index may not correlate with other APIs, such as the CUDA device index. - * - * Starting from NVML 5, this API causes NVML to initialize the target GPU - * NVML may initialize additional GPUs if: - * - The target GPU is an SLI slave - * - * Note: New nvmlDeviceGetCount_v2 (default in NVML 5.319) returns count of all devices in the system - * even if nvmlDeviceGetHandleByIndex_v2 returns NVML_ERROR_NO_PERMISSION for such device. - * Update your code to handle this error, or use NVML 4.304 or older nvml header file. - * For backward binary compatibility reasons _v1 version of the API is still present in the shared - * library. - * Old _v1 version of nvmlDeviceGetCount doesn't count devices that NVML has no permission to talk to. - * - * This means that nvmlDeviceGetHandleByIndex_v2 and _v1 can return different devices for the same index. - * If you don't touch macros that map old (_v1) versions to _v2 versions at the top of the file you don't - * need to worry about that. - * - * @param index The index of the target GPU, >= 0 and < \a accessibleDevices - * @param device Reference in which to return the device handle - * - * @return - * - \ref NVML_SUCCESS if \a device has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a index is invalid or \a device is NULL - * - \ref NVML_ERROR_INSUFFICIENT_POWER if any attached devices have improperly attached external power cables - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to talk to this device - * - \ref NVML_ERROR_IRQ_ISSUE if NVIDIA kernel detected an interrupt issue with the attached GPUs - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetIndex - * @see nvmlDeviceGetCount - */ -nvmlReturn_t DECLDIR nvmlDeviceGetHandleByIndex(unsigned int index, nvmlDevice_t *device); - -/** - * Acquire the handle for a particular device, based on its board serial number. - * - * For Fermi &tm; or newer fully supported devices. - * - * This number corresponds to the value printed directly on the board, and to the value returned by - * \ref nvmlDeviceGetSerial(). - * - * @deprecated Since more than one GPU can exist on a single board this function is deprecated in favor - * of \ref nvmlDeviceGetHandleByUUID. - * For dual GPU boards this function will return NVML_ERROR_INVALID_ARGUMENT. - * - * Starting from NVML 5, this API causes NVML to initialize the target GPU - * NVML may initialize additional GPUs as it searches for the target GPU - * - * @param serial The board serial number of the target GPU - * @param device Reference in which to return the device handle - * - * @return - * - \ref NVML_SUCCESS if \a device has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a serial is invalid, \a device is NULL or more than one - * device has the same serial (dual GPU boards) - * - \ref NVML_ERROR_NOT_FOUND if \a serial does not match a valid device on the system - * - \ref NVML_ERROR_INSUFFICIENT_POWER if any attached devices have improperly attached external power cables - * - \ref NVML_ERROR_IRQ_ISSUE if NVIDIA kernel detected an interrupt issue with the attached GPUs - * - \ref NVML_ERROR_GPU_IS_LOST if any GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetSerial - * @see nvmlDeviceGetHandleByUUID - */ -nvmlReturn_t DECLDIR nvmlDeviceGetHandleBySerial(const char *serial, nvmlDevice_t *device); - -/** - * Acquire the handle for a particular device, based on its globally unique immutable UUID associated with each device. - * - * For all products. - * - * @param uuid The UUID of the target GPU - * @param device Reference in which to return the device handle - * - * Starting from NVML 5, this API causes NVML to initialize the target GPU - * NVML may initialize additional GPUs as it searches for the target GPU - * - * @return - * - \ref NVML_SUCCESS if \a device has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a uuid is invalid or \a device is null - * - \ref NVML_ERROR_NOT_FOUND if \a uuid does not match a valid device on the system - * - \ref NVML_ERROR_INSUFFICIENT_POWER if any attached devices have improperly attached external power cables - * - \ref NVML_ERROR_IRQ_ISSUE if NVIDIA kernel detected an interrupt issue with the attached GPUs - * - \ref NVML_ERROR_GPU_IS_LOST if any GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetUUID - */ -nvmlReturn_t DECLDIR nvmlDeviceGetHandleByUUID(const char *uuid, nvmlDevice_t *device); - -/** - * Acquire the handle for a particular device, based on its PCI bus id. - * - * For all products. - * - * This value corresponds to the nvmlPciInfo_t::busId returned by \ref nvmlDeviceGetPciInfo(). - * - * Starting from NVML 5, this API causes NVML to initialize the target GPU - * NVML may initialize additional GPUs if: - * - The target GPU is an SLI slave - * - * \note NVML 4.304 and older version of nvmlDeviceGetHandleByPciBusId"_v1" returns NVML_ERROR_NOT_FOUND - * instead of NVML_ERROR_NO_PERMISSION. - * - * @param pciBusId The PCI bus id of the target GPU - * @param device Reference in which to return the device handle - * - * @return - * - \ref NVML_SUCCESS if \a device has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a pciBusId is invalid or \a device is NULL - * - \ref NVML_ERROR_NOT_FOUND if \a pciBusId does not match a valid device on the system - * - \ref NVML_ERROR_INSUFFICIENT_POWER if the attached device has improperly attached external power cables - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to talk to this device - * - \ref NVML_ERROR_IRQ_ISSUE if NVIDIA kernel detected an interrupt issue with the attached GPUs - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetHandleByPciBusId(const char *pciBusId, nvmlDevice_t *device); - -/** - * Retrieves the name of this device. - * - * For all products. - * - * The name is an alphanumeric string that denotes a particular product, e.g. Tesla &tm; C2070. It will not - * exceed 64 characters in length (including the NULL terminator). See \ref - * nvmlConstants::NVML_DEVICE_NAME_BUFFER_SIZE. - * - * @param device The identifier of the target device - * @param name Reference in which to return the product name - * @param length The maximum allowed length of the string returned in \a name - * - * @return - * - \ref NVML_SUCCESS if \a name has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a name is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetName(nvmlDevice_t device, char *name, unsigned int length); - -/** - * Retrieves the brand of this device. - * - * For all products. - * - * The type is a member of \ref nvmlBrandType_t defined above. - * - * @param device The identifier of the target device - * @param type Reference in which to return the product brand type - * - * @return - * - \ref NVML_SUCCESS if \a name has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a type is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetBrand(nvmlDevice_t device, nvmlBrandType_t *type); - -/** - * Retrieves the NVML index of this device. - * - * For all products. - * - * Valid indices are derived from the \a accessibleDevices count returned by - * \ref nvmlDeviceGetCount(). For example, if \a accessibleDevices is 2 the valid indices - * are 0 and 1, corresponding to GPU 0 and GPU 1. - * - * The order in which NVML enumerates devices has no guarantees of consistency between reboots. For that reason it - * is recommended that devices be looked up by their PCI ids or GPU UUID. See - * \ref nvmlDeviceGetHandleByPciBusId() and \ref nvmlDeviceGetHandleByUUID(). - * - * Note: The NVML index may not correlate with other APIs, such as the CUDA device index. - * - * @param device The identifier of the target device - * @param index Reference in which to return the NVML index of the device - * - * @return - * - \ref NVML_SUCCESS if \a index has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a index is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetHandleByIndex() - * @see nvmlDeviceGetCount() - */ -nvmlReturn_t DECLDIR nvmlDeviceGetIndex(nvmlDevice_t device, unsigned int *index); - -/** - * Retrieves the globally unique board serial number associated with this device's board. - * - * For all products with an inforom. - * - * The serial number is an alphanumeric string that will not exceed 30 characters (including the NULL terminator). - * This number matches the serial number tag that is physically attached to the board. See \ref - * nvmlConstants::NVML_DEVICE_SERIAL_BUFFER_SIZE. - * - * @param device The identifier of the target device - * @param serial Reference in which to return the board/module serial number - * @param length The maximum allowed length of the string returned in \a serial - * - * @return - * - \ref NVML_SUCCESS if \a serial has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a serial is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetSerial(nvmlDevice_t device, char *serial, unsigned int length); - -/** - * Retrieves an array of unsigned ints (sized to cpuSetSize) of bitmasks with the ideal CPU affinity for the device - * For example, if processors 0, 1, 32, and 33 are ideal for the device and cpuSetSize == 2, - * result[0] = 0x3, result[1] = 0x3 - * - * For Kepler &tm; or newer fully supported devices. - * Supported on Linux only. - * - * @param device The identifier of the target device - * @param cpuSetSize The size of the cpuSet array that is safe to access - * @param cpuSet Array reference in which to return a bitmask of CPUs, 64 CPUs per - * unsigned long on 64-bit machines, 32 on 32-bit machines - * - * @return - * - \ref NVML_SUCCESS if \a cpuAffinity has been filled - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, cpuSetSize == 0, or cpuSet is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetCpuAffinity(nvmlDevice_t device, unsigned int cpuSetSize, unsigned long *cpuSet); - -/** - * Sets the ideal affinity for the calling thread and device using the guidelines - * given in nvmlDeviceGetCpuAffinity(). Note, this is a change as of version 8.0. - * Older versions set the affinity for a calling process and all children. - * Currently supports up to 64 processors. - * - * For Kepler &tm; or newer fully supported devices. - * Supported on Linux only. - * - * @param device The identifier of the target device - * - * @return - * - \ref NVML_SUCCESS if the calling process has been successfully bound - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceSetCpuAffinity(nvmlDevice_t device); - -/** - * Clear all affinity bindings for the calling thread. Note, this is a change as of version - * 8.0 as older versions cleared the affinity for a calling process and all children. - * - * For Kepler &tm; or newer fully supported devices. - * Supported on Linux only. - * - * @param device The identifier of the target device - * - * @return - * - \ref NVML_SUCCESS if the calling process has been successfully unbound - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceClearCpuAffinity(nvmlDevice_t device); - -/** - * Retrieve the common ancestor for two devices - * For all products. - * Supported on Linux only. - * - * @param device1 The identifier of the first device - * @param device2 The identifier of the second device - * @param pathInfo A \ref nvmlGpuTopologyLevel_t that gives the path type - * - * @return - * - \ref NVML_SUCCESS if \a pathInfo has been set - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device1, or \a device2 is invalid, or \a pathInfo is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device or OS does not support this feature - * - \ref NVML_ERROR_UNKNOWN an error has occurred in underlying topology discovery - */ -nvmlReturn_t DECLDIR nvmlDeviceGetTopologyCommonAncestor(nvmlDevice_t device1, nvmlDevice_t device2, nvmlGpuTopologyLevel_t *pathInfo); - -/** - * Retrieve the set of GPUs that are nearest to a given device at a specific interconnectivity level - * For all products. - * Supported on Linux only. - * - * @param device The identifier of the first device - * @param level The \ref nvmlGpuTopologyLevel_t level to search for other GPUs - * @param count When zero, is set to the number of matching GPUs such that \a deviceArray - * can be malloc'd. When non-zero, \a deviceArray will be filled with \a count - * number of device handles. - * @param deviceArray An array of device handles for GPUs found at \a level - * - * @return - * - \ref NVML_SUCCESS if \a deviceArray or \a count (if initially zero) has been set - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a level, or \a count is invalid, or \a deviceArray is NULL with a non-zero \a count - * - \ref NVML_ERROR_NOT_SUPPORTED if the device or OS does not support this feature - * - \ref NVML_ERROR_UNKNOWN an error has occurred in underlying topology discovery - */ -nvmlReturn_t DECLDIR nvmlDeviceGetTopologyNearestGpus(nvmlDevice_t device, nvmlGpuTopologyLevel_t level, unsigned int *count, nvmlDevice_t *deviceArray); - -/** - * Retrieve the set of GPUs that have a CPU affinity with the given CPU number - * For all products. - * Supported on Linux only. - * - * @param cpuNumber The CPU number - * @param count When zero, is set to the number of matching GPUs such that \a deviceArray - * can be malloc'd. When non-zero, \a deviceArray will be filled with \a count - * number of device handles. - * @param deviceArray An array of device handles for GPUs found with affinity to \a cpuNumber - * - * @return - * - \ref NVML_SUCCESS if \a deviceArray or \a count (if initially zero) has been set - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a cpuNumber, or \a count is invalid, or \a deviceArray is NULL with a non-zero \a count - * - \ref NVML_ERROR_NOT_SUPPORTED if the device or OS does not support this feature - * - \ref NVML_ERROR_UNKNOWN an error has occurred in underlying topology discovery - */ -nvmlReturn_t DECLDIR nvmlSystemGetTopologyGpuSet(unsigned int cpuNumber, unsigned int *count, nvmlDevice_t *deviceArray); - -/** - * Retrieve the status for a given p2p capability index between a given pair of GPU - * - * @param device1 The first device - * @param device2 The second device - * @param p2pIndex p2p Capability Index being looked for between \a device1 and \a device2 - * @param p2pStatus Reference in which to return the status of the \a p2pIndex - * between \a device1 and \a device2 - * @return - * - \ref NVML_SUCCESS if \a p2pStatus has been populated - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device1 or \a device2 or \a p2pIndex is invalid or \a p2pStatus is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetP2PStatus(nvmlDevice_t device1, nvmlDevice_t device2, nvmlGpuP2PCapsIndex_t p2pIndex,nvmlGpuP2PStatus_t *p2pStatus); - -/** - * Retrieves the globally unique immutable UUID associated with this device, as a 5 part hexadecimal string, - * that augments the immutable, board serial identifier. - * - * For all products. - * - * The UUID is a globally unique identifier. It is the only available identifier for pre-Fermi-architecture products. - * It does NOT correspond to any identifier printed on the board. It will not exceed 80 characters in length - * (including the NULL terminator). See \ref nvmlConstants::NVML_DEVICE_UUID_BUFFER_SIZE. - * - * @param device The identifier of the target device - * @param uuid Reference in which to return the GPU UUID - * @param length The maximum allowed length of the string returned in \a uuid - * - * @return - * - \ref NVML_SUCCESS if \a uuid has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a uuid is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetUUID(nvmlDevice_t device, char *uuid, unsigned int length); - -/** - * Retrieves minor number for the device. The minor number for the device is such that the Nvidia device node file for - * each GPU will have the form /dev/nvidia[minor number]. - * - * For all products. - * Supported only for Linux - * - * @param device The identifier of the target device - * @param minorNumber Reference in which to return the minor number for the device - * @return - * - \ref NVML_SUCCESS if the minor number is successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a minorNumber is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetMinorNumber(nvmlDevice_t device, unsigned int *minorNumber); - -/** - * Retrieves the the device board part number which is programmed into the board's InfoROM - * - * For all products. - * - * @param device Identifier of the target device - * @param partNumber Reference to the buffer to return - * @param length Length of the buffer reference - * - * @return - * - \ref NVML_SUCCESS if \a partNumber has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_NOT_SUPPORTED if the needed VBIOS fields have not been filled - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a serial is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetBoardPartNumber(nvmlDevice_t device, char* partNumber, unsigned int length); - -/** - * Retrieves the version information for the device's infoROM object. - * - * For all products with an inforom. - * - * Fermi and higher parts have non-volatile on-board memory for persisting device info, such as aggregate - * ECC counts. The version of the data structures in this memory may change from time to time. It will not - * exceed 16 characters in length (including the NULL terminator). - * See \ref nvmlConstants::NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE. - * - * See \ref nvmlInforomObject_t for details on the available infoROM objects. - * - * @param device The identifier of the target device - * @param object The target infoROM object - * @param version Reference in which to return the infoROM version - * @param length The maximum allowed length of the string returned in \a version - * - * @return - * - \ref NVML_SUCCESS if \a version has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a version is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have an infoROM - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetInforomImageVersion - */ -nvmlReturn_t DECLDIR nvmlDeviceGetInforomVersion(nvmlDevice_t device, nvmlInforomObject_t object, char *version, unsigned int length); - -/** - * Retrieves the global infoROM image version - * - * For all products with an inforom. - * - * Image version just like VBIOS version uniquely describes the exact version of the infoROM flashed on the board - * in contrast to infoROM object version which is only an indicator of supported features. - * Version string will not exceed 16 characters in length (including the NULL terminator). - * See \ref nvmlConstants::NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE. - * - * @param device The identifier of the target device - * @param version Reference in which to return the infoROM image version - * @param length The maximum allowed length of the string returned in \a version - * - * @return - * - \ref NVML_SUCCESS if \a version has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a version is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have an infoROM - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetInforomVersion - */ -nvmlReturn_t DECLDIR nvmlDeviceGetInforomImageVersion(nvmlDevice_t device, char *version, unsigned int length); - -/** - * Retrieves the checksum of the configuration stored in the device's infoROM. - * - * For all products with an inforom. - * - * Can be used to make sure that two GPUs have the exact same configuration. - * Current checksum takes into account configuration stored in PWR and ECC infoROM objects. - * Checksum can change between driver releases or when user changes configuration (e.g. disable/enable ECC) - * - * @param device The identifier of the target device - * @param checksum Reference in which to return the infoROM configuration checksum - * - * @return - * - \ref NVML_SUCCESS if \a checksum has been set - * - \ref NVML_ERROR_CORRUPTED_INFOROM if the device's checksum couldn't be retrieved due to infoROM corruption - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a checksum is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetInforomConfigurationChecksum(nvmlDevice_t device, unsigned int *checksum); - -/** - * Reads the infoROM from the flash and verifies the checksums. - * - * For all products with an inforom. - * - * @param device The identifier of the target device - * - * @return - * - \ref NVML_SUCCESS if infoROM is not corrupted - * - \ref NVML_ERROR_CORRUPTED_INFOROM if the device's infoROM is corrupted - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceValidateInforom(nvmlDevice_t device); - -/** - * Retrieves the display mode for the device. - * - * For all products. - * - * This method indicates whether a physical display (e.g. monitor) is currently connected to - * any of the device's connectors. - * - * See \ref nvmlEnableState_t for details on allowed modes. - * - * @param device The identifier of the target device - * @param display Reference in which to return the display mode - * - * @return - * - \ref NVML_SUCCESS if \a display has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a display is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetDisplayMode(nvmlDevice_t device, nvmlEnableState_t *display); - -/** - * Retrieves the display active state for the device. - * - * For all products. - * - * This method indicates whether a display is initialized on the device. - * For example whether X Server is attached to this device and has allocated memory for the screen. - * - * Display can be active even when no monitor is physically attached. - * - * See \ref nvmlEnableState_t for details on allowed modes. - * - * @param device The identifier of the target device - * @param isActive Reference in which to return the display active state - * - * @return - * - \ref NVML_SUCCESS if \a isActive has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a isActive is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetDisplayActive(nvmlDevice_t device, nvmlEnableState_t *isActive); - -/** - * Retrieves the persistence mode associated with this device. - * - * For all products. - * For Linux only. - * - * When driver persistence mode is enabled the driver software state is not torn down when the last - * client disconnects. By default this feature is disabled. - * - * See \ref nvmlEnableState_t for details on allowed modes. - * - * @param device The identifier of the target device - * @param mode Reference in which to return the current driver persistence mode - * - * @return - * - \ref NVML_SUCCESS if \a mode has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceSetPersistenceMode() - */ -nvmlReturn_t DECLDIR nvmlDeviceGetPersistenceMode(nvmlDevice_t device, nvmlEnableState_t *mode); - -/** - * Retrieves the PCI attributes of this device. - * - * For all products. - * - * See \ref nvmlPciInfo_t for details on the available PCI info. - * - * @param device The identifier of the target device - * @param pci Reference in which to return the PCI info - * - * @return - * - \ref NVML_SUCCESS if \a pci has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pci is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetPciInfo(nvmlDevice_t device, nvmlPciInfo_t *pci); - -/** - * Retrieves the maximum PCIe link generation possible with this device and system - * - * I.E. for a generation 2 PCIe device attached to a generation 1 PCIe bus the max link generation this function will - * report is generation 1. - * - * For Fermi &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param maxLinkGen Reference in which to return the max PCIe link generation - * - * @return - * - \ref NVML_SUCCESS if \a maxLinkGen has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a maxLinkGen is null - * - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetMaxPcieLinkGeneration(nvmlDevice_t device, unsigned int *maxLinkGen); - -/** - * Retrieves the maximum PCIe link width possible with this device and system - * - * I.E. for a device with a 16x PCIe bus width attached to a 8x PCIe system bus this function will report - * a max link width of 8. - * - * For Fermi &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param maxLinkWidth Reference in which to return the max PCIe link generation - * - * @return - * - \ref NVML_SUCCESS if \a maxLinkWidth has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a maxLinkWidth is null - * - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetMaxPcieLinkWidth(nvmlDevice_t device, unsigned int *maxLinkWidth); - -/** - * Retrieves the current PCIe link generation - * - * For Fermi &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param currLinkGen Reference in which to return the current PCIe link generation - * - * @return - * - \ref NVML_SUCCESS if \a currLinkGen has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a currLinkGen is null - * - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetCurrPcieLinkGeneration(nvmlDevice_t device, unsigned int *currLinkGen); - -/** - * Retrieves the current PCIe link width - * - * For Fermi &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param currLinkWidth Reference in which to return the current PCIe link generation - * - * @return - * - \ref NVML_SUCCESS if \a currLinkWidth has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a currLinkWidth is null - * - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetCurrPcieLinkWidth(nvmlDevice_t device, unsigned int *currLinkWidth); - -/** - * Retrieve PCIe utilization information. - * This function is querying a byte counter over a 20ms interval and thus is the - * PCIe throughput over that interval. - * - * For Maxwell &tm; or newer fully supported devices. - * - * This method is not supported in virtual machines running virtual GPU (vGPU). - * - * @param device The identifier of the target device - * @param counter The specific counter that should be queried \ref nvmlPcieUtilCounter_t - * @param value Reference in which to return throughput in KB/s - * - * @return - * - \ref NVML_SUCCESS if \a value has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a counter is invalid, or \a value is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetPcieThroughput(nvmlDevice_t device, nvmlPcieUtilCounter_t counter, unsigned int *value); - -/** - * Retrieve the PCIe replay counter. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param value Reference in which to return the counter's value - * - * @return - * - \ref NVML_SUCCESS if \a value and \a rollover have been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a value or \a rollover are NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetPcieReplayCounter(nvmlDevice_t device, unsigned int *value); - -/** - * Retrieves the current clock speeds for the device. - * - * For Fermi &tm; or newer fully supported devices. - * - * See \ref nvmlClockType_t for details on available clock information. - * - * @param device The identifier of the target device - * @param type Identify which clock domain to query - * @param clock Reference in which to return the clock speed in MHz - * - * @return - * - \ref NVML_SUCCESS if \a clock has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clock is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device cannot report the specified clock - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetClockInfo(nvmlDevice_t device, nvmlClockType_t type, unsigned int *clock); - -/** - * Retrieves the maximum clock speeds for the device. - * - * For Fermi &tm; or newer fully supported devices. - * - * See \ref nvmlClockType_t for details on available clock information. - * - * \note On GPUs from Fermi family current P0 clocks (reported by \ref nvmlDeviceGetClockInfo) can differ from max clocks - * by few MHz. - * - * @param device The identifier of the target device - * @param type Identify which clock domain to query - * @param clock Reference in which to return the clock speed in MHz - * - * @return - * - \ref NVML_SUCCESS if \a clock has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clock is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device cannot report the specified clock - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetMaxClockInfo(nvmlDevice_t device, nvmlClockType_t type, unsigned int *clock); - -/** - * Retrieves the current setting of a clock that applications will use unless an overspec situation occurs. - * Can be changed using \ref nvmlDeviceSetApplicationsClocks. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param clockType Identify which clock domain to query - * @param clockMHz Reference in which to return the clock in MHz - * - * @return - * - \ref NVML_SUCCESS if \a clockMHz has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clockMHz is NULL or \a clockType is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetApplicationsClock(nvmlDevice_t device, nvmlClockType_t clockType, unsigned int *clockMHz); - -/** - * Retrieves the default applications clock that GPU boots with or - * defaults to after \ref nvmlDeviceResetApplicationsClocks call. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param clockType Identify which clock domain to query - * @param clockMHz Reference in which to return the default clock in MHz - * - * @return - * - \ref NVML_SUCCESS if \a clockMHz has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clockMHz is NULL or \a clockType is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * \see nvmlDeviceGetApplicationsClock - */ -nvmlReturn_t DECLDIR nvmlDeviceGetDefaultApplicationsClock(nvmlDevice_t device, nvmlClockType_t clockType, unsigned int *clockMHz); - -/** - * Resets the application clock to the default value - * - * This is the applications clock that will be used after system reboot or driver reload. - * Default value is constant, but the current value an be changed using \ref nvmlDeviceSetApplicationsClocks. - * - * On Pascal and newer hardware, if clocks were previously locked with \ref nvmlDeviceSetApplicationsClocks, - * this call will unlock clocks. This returns clocks their default behavior ofautomatically boosting above - * base clocks as thermal limits allow. - * - * @see nvmlDeviceGetApplicationsClock - * @see nvmlDeviceSetApplicationsClocks - * - * For Fermi &tm; or newer non-GeForce fully supported devices and Maxwell or newer GeForce devices. - * - * @param device The identifier of the target device - * - * @return - * - \ref NVML_SUCCESS if new settings were successfully set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceResetApplicationsClocks(nvmlDevice_t device); - -/** - * Retrieves the clock speed for the clock specified by the clock type and clock ID. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param clockType Identify which clock domain to query - * @param clockId Identify which clock in the domain to query - * @param clockMHz Reference in which to return the clock in MHz - * - * @return - * - \ref NVML_SUCCESS if \a clockMHz has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clockMHz is NULL or \a clockType is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetClock(nvmlDevice_t device, nvmlClockType_t clockType, nvmlClockId_t clockId, unsigned int *clockMHz); - -/** - * Retrieves the customer defined maximum boost clock speed specified by the given clock type. - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param clockType Identify which clock domain to query - * @param clockMHz Reference in which to return the clock in MHz - * - * @return - * - \ref NVML_SUCCESS if \a clockMHz has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clockMHz is NULL or \a clockType is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device or the \a clockType on this device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetMaxCustomerBoostClock(nvmlDevice_t device, nvmlClockType_t clockType, unsigned int *clockMHz); - -/** - * Retrieves the list of possible memory clocks that can be used as an argument for \ref nvmlDeviceSetApplicationsClocks. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param count Reference in which to provide the \a clocksMHz array size, and - * to return the number of elements - * @param clocksMHz Reference in which to return the clock in MHz - * - * @return - * - \ref NVML_SUCCESS if \a count and \a clocksMHz have been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a count is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small (\a count is set to the number of - * required elements) - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceSetApplicationsClocks - * @see nvmlDeviceGetSupportedGraphicsClocks - */ -nvmlReturn_t DECLDIR nvmlDeviceGetSupportedMemoryClocks(nvmlDevice_t device, unsigned int *count, unsigned int *clocksMHz); - -/** - * Retrieves the list of possible graphics clocks that can be used as an argument for \ref nvmlDeviceSetApplicationsClocks. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param memoryClockMHz Memory clock for which to return possible graphics clocks - * @param count Reference in which to provide the \a clocksMHz array size, and - * to return the number of elements - * @param clocksMHz Reference in which to return the clocks in MHz - * - * @return - * - \ref NVML_SUCCESS if \a count and \a clocksMHz have been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_NOT_FOUND if the specified \a memoryClockMHz is not a supported frequency - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clock is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceSetApplicationsClocks - * @see nvmlDeviceGetSupportedMemoryClocks - */ -nvmlReturn_t DECLDIR nvmlDeviceGetSupportedGraphicsClocks(nvmlDevice_t device, unsigned int memoryClockMHz, unsigned int *count, unsigned int *clocksMHz); - -/** - * Retrieve the current state of Auto Boosted clocks on a device and store it in \a isEnabled - * - * For Kepler &tm; or newer fully supported devices. - * - * Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates - * to maximize performance as thermal limits allow. - * - * On Pascal and newer hardware, Auto Aoosted clocks are controlled through application clocks. - * Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost - * behavior. - * - * @param device The identifier of the target device - * @param isEnabled Where to store the current state of Auto Boosted clocks of the target device - * @param defaultIsEnabled Where to store the default Auto Boosted clocks behavior of the target device that the device will - * revert to when no applications are using the GPU - * - * @return - * - \ref NVML_SUCCESS If \a isEnabled has been been set with the Auto Boosted clocks state of \a device - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a isEnabled is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - */ -nvmlReturn_t DECLDIR nvmlDeviceGetAutoBoostedClocksEnabled(nvmlDevice_t device, nvmlEnableState_t *isEnabled, nvmlEnableState_t *defaultIsEnabled); - -/** - * Try to set the current state of Auto Boosted clocks on a device. - * - * For Kepler &tm; or newer fully supported devices. - * - * Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates - * to maximize performance as thermal limits allow. Auto Boosted clocks should be disabled if fixed clock - * rates are desired. - * - * Non-root users may use this API by default but can be restricted by root from using this API by calling - * \ref nvmlDeviceSetAPIRestriction with apiType=NVML_RESTRICTED_API_SET_AUTO_BOOSTED_CLOCKS. - * Note: Persistence Mode is required to modify current Auto Boost settings, therefore, it must be enabled. - * - * On Pascal and newer hardware, Auto Boosted clocks are controlled through application clocks. - * Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost - * behavior. - * - * @param device The identifier of the target device - * @param enabled What state to try to set Auto Boosted clocks of the target device to - * - * @return - * - \ref NVML_SUCCESS If the Auto Boosted clocks were successfully set to the state specified by \a enabled - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - */ -nvmlReturn_t DECLDIR nvmlDeviceSetAutoBoostedClocksEnabled(nvmlDevice_t device, nvmlEnableState_t enabled); - -/** - * Try to set the default state of Auto Boosted clocks on a device. This is the default state that Auto Boosted clocks will - * return to when no compute running processes (e.g. CUDA application which have an active context) are running - * - * For Kepler &tm; or newer non-GeForce fully supported devices and Maxwell or newer GeForce devices. - * Requires root/admin permissions. - * - * Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates - * to maximize performance as thermal limits allow. Auto Boosted clocks should be disabled if fixed clock - * rates are desired. - * - * On Pascal and newer hardware, Auto Boosted clocks are controlled through application clocks. - * Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost - * behavior. - * - * @param device The identifier of the target device - * @param enabled What state to try to set default Auto Boosted clocks of the target device to - * @param flags Flags that change the default behavior. Currently Unused. - * - * @return - * - \ref NVML_SUCCESS If the Auto Boosted clock's default state was successfully set to the state specified by \a enabled - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_NO_PERMISSION If the calling user does not have permission to change Auto Boosted clock's default state. - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - */ -nvmlReturn_t DECLDIR nvmlDeviceSetDefaultAutoBoostedClocksEnabled(nvmlDevice_t device, nvmlEnableState_t enabled, unsigned int flags); - - -/** - * Retrieves the intended operating speed of the device's fan. - * - * Note: The reported speed is the intended fan speed. If the fan is physically blocked and unable to spin, the - * output will not match the actual fan speed. - * - * For all discrete products with dedicated fans. - * - * The fan speed is expressed as a percent of the maximum, i.e. full speed is 100%. - * - * @param device The identifier of the target device - * @param speed Reference in which to return the fan speed percentage - * - * @return - * - \ref NVML_SUCCESS if \a speed has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a speed is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a fan - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetFanSpeed(nvmlDevice_t device, unsigned int *speed); - -/** - * Retrieves the current temperature readings for the device, in degrees C. - * - * For all products. - * - * See \ref nvmlTemperatureSensors_t for details on available temperature sensors. - * - * @param device The identifier of the target device - * @param sensorType Flag that indicates which sensor reading to retrieve - * @param temp Reference in which to return the temperature reading - * - * @return - * - \ref NVML_SUCCESS if \a temp has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a sensorType is invalid or \a temp is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have the specified sensor - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetTemperature(nvmlDevice_t device, nvmlTemperatureSensors_t sensorType, unsigned int *temp); - -/** - * Retrieves the temperature threshold for the GPU with the specified threshold type in degrees C. - * - * For Kepler &tm; or newer fully supported devices. - * - * See \ref nvmlTemperatureThresholds_t for details on available temperature thresholds. - * - * @param device The identifier of the target device - * @param thresholdType The type of threshold value queried - * @param temp Reference in which to return the temperature reading - * @return - * - \ref NVML_SUCCESS if \a temp has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a thresholdType is invalid or \a temp is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a temperature sensor or is unsupported - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetTemperatureThreshold(nvmlDevice_t device, nvmlTemperatureThresholds_t thresholdType, unsigned int *temp); - -/** - * Retrieves the current performance state for the device. - * - * For Fermi &tm; or newer fully supported devices. - * - * See \ref nvmlPstates_t for details on allowed performance states. - * - * @param device The identifier of the target device - * @param pState Reference in which to return the performance state reading - * - * @return - * - \ref NVML_SUCCESS if \a pState has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pState is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetPerformanceState(nvmlDevice_t device, nvmlPstates_t *pState); - -/** - * Retrieves current clocks throttling reasons. - * - * For all fully supported products. - * - * \note More than one bit can be enabled at the same time. Multiple reasons can be affecting clocks at once. - * - * @param device The identifier of the target device - * @param clocksThrottleReasons Reference in which to return bitmask of active clocks throttle - * reasons - * - * @return - * - \ref NVML_SUCCESS if \a clocksThrottleReasons has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clocksThrottleReasons is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlClocksThrottleReasons - * @see nvmlDeviceGetSupportedClocksThrottleReasons - */ -nvmlReturn_t DECLDIR nvmlDeviceGetCurrentClocksThrottleReasons(nvmlDevice_t device, unsigned long long *clocksThrottleReasons); - -/** - * Retrieves bitmask of supported clocks throttle reasons that can be returned by - * \ref nvmlDeviceGetCurrentClocksThrottleReasons - * - * For all fully supported products. - * - * This method is not supported in virtual machines running virtual GPU (vGPU). - * - * @param device The identifier of the target device - * @param supportedClocksThrottleReasons Reference in which to return bitmask of supported - * clocks throttle reasons - * - * @return - * - \ref NVML_SUCCESS if \a supportedClocksThrottleReasons has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a supportedClocksThrottleReasons is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlClocksThrottleReasons - * @see nvmlDeviceGetCurrentClocksThrottleReasons - */ -nvmlReturn_t DECLDIR nvmlDeviceGetSupportedClocksThrottleReasons(nvmlDevice_t device, unsigned long long *supportedClocksThrottleReasons); - -/** - * Deprecated: Use \ref nvmlDeviceGetPerformanceState. This function exposes an incorrect generalization. - * - * Retrieve the current performance state for the device. - * - * For Fermi &tm; or newer fully supported devices. - * - * See \ref nvmlPstates_t for details on allowed performance states. - * - * @param device The identifier of the target device - * @param pState Reference in which to return the performance state reading - * - * @return - * - \ref NVML_SUCCESS if \a pState has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pState is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetPowerState(nvmlDevice_t device, nvmlPstates_t *pState); - -/** - * This API has been deprecated. - * - * Retrieves the power management mode associated with this device. - * - * For products from the Fermi family. - * - Requires \a NVML_INFOROM_POWER version 3.0 or higher. - * - * For from the Kepler or newer families. - * - Does not require \a NVML_INFOROM_POWER object. - * - * This flag indicates whether any power management algorithm is currently active on the device. An - * enabled state does not necessarily mean the device is being actively throttled -- only that - * that the driver will do so if the appropriate conditions are met. - * - * See \ref nvmlEnableState_t for details on allowed modes. - * - * @param device The identifier of the target device - * @param mode Reference in which to return the current power management mode - * - * @return - * - \ref NVML_SUCCESS if \a mode has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetPowerManagementMode(nvmlDevice_t device, nvmlEnableState_t *mode); - -/** - * Retrieves the power management limit associated with this device. - * - * For Fermi &tm; or newer fully supported devices. - * - * The power limit defines the upper boundary for the card's power draw. If - * the card's total power draw reaches this limit the power management algorithm kicks in. - * - * This reading is only available if power management mode is supported. - * See \ref nvmlDeviceGetPowerManagementMode. - * - * @param device The identifier of the target device - * @param limit Reference in which to return the power management limit in milliwatts - * - * @return - * - \ref NVML_SUCCESS if \a limit has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a limit is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetPowerManagementLimit(nvmlDevice_t device, unsigned int *limit); - -/** - * Retrieves information about possible values of power management limits on this device. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param minLimit Reference in which to return the minimum power management limit in milliwatts - * @param maxLimit Reference in which to return the maximum power management limit in milliwatts - * - * @return - * - \ref NVML_SUCCESS if \a minLimit and \a maxLimit have been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a minLimit or \a maxLimit is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceSetPowerManagementLimit - */ -nvmlReturn_t DECLDIR nvmlDeviceGetPowerManagementLimitConstraints(nvmlDevice_t device, unsigned int *minLimit, unsigned int *maxLimit); - -/** - * Retrieves default power management limit on this device, in milliwatts. - * Default power management limit is a power management limit that the device boots with. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param defaultLimit Reference in which to return the default power management limit in milliwatts - * - * @return - * - \ref NVML_SUCCESS if \a defaultLimit has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a defaultLimit is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetPowerManagementDefaultLimit(nvmlDevice_t device, unsigned int *defaultLimit); - -/** - * Retrieves power usage for this GPU in milliwatts and its associated circuitry (e.g. memory) - * - * For Fermi &tm; or newer fully supported devices. - * - * On Fermi and Kepler GPUs the reading is accurate to within +/- 5% of current power draw. - * - * It is only available if power management mode is supported. See \ref nvmlDeviceGetPowerManagementMode. - * - * @param device The identifier of the target device - * @param power Reference in which to return the power usage information - * - * @return - * - \ref NVML_SUCCESS if \a power has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a power is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support power readings - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetPowerUsage(nvmlDevice_t device, unsigned int *power); - -/** - * Retrieves total energy consumption for this GPU in millijoules (mJ) since the driver was last reloaded - * - * For newer than Pascal &tm; fully supported devices. - * - * @param device The identifier of the target device - * @param energy Reference in which to return the energy consumption information - * - * @return - * - \ref NVML_SUCCESS if \a energy has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a energy is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support energy readings - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetTotalEnergyConsumption(nvmlDevice_t device, unsigned long long *energy); - -/** - * Get the effective power limit that the driver enforces after taking into account all limiters - * - * Note: This can be different from the \ref nvmlDeviceGetPowerManagementLimit if other limits are set elsewhere - * This includes the out of band power limit interface - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The device to communicate with - * @param limit Reference in which to return the power management limit in milliwatts - * - * @return - * - \ref NVML_SUCCESS if \a limit has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a limit is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetEnforcedPowerLimit(nvmlDevice_t device, unsigned int *limit); - -/** - * Retrieves the current GOM and pending GOM (the one that GPU will switch to after reboot). - * - * For GK110 M-class and X-class Tesla &tm; products from the Kepler family. - * Modes \ref NVML_GOM_LOW_DP and \ref NVML_GOM_ALL_ON are supported on fully supported GeForce products. - * Not supported on Quadro ® and Tesla &tm; C-class products. - * - * @param device The identifier of the target device - * @param current Reference in which to return the current GOM - * @param pending Reference in which to return the pending GOM - * - * @return - * - \ref NVML_SUCCESS if \a mode has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a current or \a pending is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlGpuOperationMode_t - * @see nvmlDeviceSetGpuOperationMode - */ -nvmlReturn_t DECLDIR nvmlDeviceGetGpuOperationMode(nvmlDevice_t device, nvmlGpuOperationMode_t *current, nvmlGpuOperationMode_t *pending); - -/** - * Retrieves the amount of used, free and total memory available on the device, in bytes. - * - * For all products. - * - * Enabling ECC reduces the amount of total available memory, due to the extra required parity bits. - * Under WDDM most device memory is allocated and managed on startup by Windows. - * - * Under Linux and Windows TCC, the reported amount of used memory is equal to the sum of memory allocated - * by all active channels on the device. - * - * See \ref nvmlMemory_t for details on available memory info. - * - * @param device The identifier of the target device - * @param memory Reference in which to return the memory information - * - * @return - * - \ref NVML_SUCCESS if \a memory has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memory is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetMemoryInfo(nvmlDevice_t device, nvmlMemory_t *memory); - -/** - * Retrieves the current compute mode for the device. - * - * For all products. - * - * See \ref nvmlComputeMode_t for details on allowed compute modes. - * - * @param device The identifier of the target device - * @param mode Reference in which to return the current compute mode - * - * @return - * - \ref NVML_SUCCESS if \a mode has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceSetComputeMode() - */ -nvmlReturn_t DECLDIR nvmlDeviceGetComputeMode(nvmlDevice_t device, nvmlComputeMode_t *mode); - -/** - * Retrieves the CUDA compute capability of the device. - * - * For all products. - * - * Returns the major and minor compute capability version numbers of the - * device. The major and minor versions are equivalent to the - * CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR and - * CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR attributes that would be - * returned by CUDA's cuDeviceGetAttribute(). - * - * @param device The identifier of the target device - * @param major Reference in which to return the major CUDA compute capability - * @param minor Reference in which to return the minor CUDA compute capability - * - * @return - * - \ref NVML_SUCCESS if \a major and \a minor have been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a major or \a minor are NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetCudaComputeCapability(nvmlDevice_t device, int *major, int *minor); - -/** - * Retrieves the current and pending ECC modes for the device. - * - * For Fermi &tm; or newer fully supported devices. - * Only applicable to devices with ECC. - * Requires \a NVML_INFOROM_ECC version 1.0 or higher. - * - * Changing ECC modes requires a reboot. The "pending" ECC mode refers to the target mode following - * the next reboot. - * - * See \ref nvmlEnableState_t for details on allowed modes. - * - * @param device The identifier of the target device - * @param current Reference in which to return the current ECC mode - * @param pending Reference in which to return the pending ECC mode - * - * @return - * - \ref NVML_SUCCESS if \a current and \a pending have been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or either \a current or \a pending is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceSetEccMode() - */ -nvmlReturn_t DECLDIR nvmlDeviceGetEccMode(nvmlDevice_t device, nvmlEnableState_t *current, nvmlEnableState_t *pending); - -/** - * Retrieves the device boardId from 0-N. - * Devices with the same boardId indicate GPUs connected to the same PLX. Use in conjunction with - * \ref nvmlDeviceGetMultiGpuBoard() to decide if they are on the same board as well. - * The boardId returned is a unique ID for the current configuration. Uniqueness and ordering across - * reboots and system configurations is not guaranteed (i.e. if a Tesla K40c returns 0x100 and - * the two GPUs on a Tesla K10 in the same system returns 0x200 it is not guaranteed they will - * always return those values but they will always be different from each other). - * - * - * For Fermi &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param boardId Reference in which to return the device's board ID - * - * @return - * - \ref NVML_SUCCESS if \a boardId has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a boardId is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetBoardId(nvmlDevice_t device, unsigned int *boardId); - -/** - * Retrieves whether the device is on a Multi-GPU Board - * Devices that are on multi-GPU boards will set \a multiGpuBool to a non-zero value. - * - * For Fermi &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param multiGpuBool Reference in which to return a zero or non-zero value - * to indicate whether the device is on a multi GPU board - * - * @return - * - \ref NVML_SUCCESS if \a multiGpuBool has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a multiGpuBool is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetMultiGpuBoard(nvmlDevice_t device, unsigned int *multiGpuBool); - -/** - * Retrieves the total ECC error counts for the device. - * - * For Fermi &tm; or newer fully supported devices. - * Only applicable to devices with ECC. - * Requires \a NVML_INFOROM_ECC version 1.0 or higher. - * Requires ECC Mode to be enabled. - * - * The total error count is the sum of errors across each of the separate memory systems, i.e. the total set of - * errors across the entire device. - * - * See \ref nvmlMemoryErrorType_t for a description of available error types.\n - * See \ref nvmlEccCounterType_t for a description of available counter types. - * - * @param device The identifier of the target device - * @param errorType Flag that specifies the type of the errors. - * @param counterType Flag that specifies the counter-type of the errors. - * @param eccCounts Reference in which to return the specified ECC errors - * - * @return - * - \ref NVML_SUCCESS if \a eccCounts has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a errorType or \a counterType is invalid, or \a eccCounts is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceClearEccErrorCounts() - */ -nvmlReturn_t DECLDIR nvmlDeviceGetTotalEccErrors(nvmlDevice_t device, nvmlMemoryErrorType_t errorType, nvmlEccCounterType_t counterType, unsigned long long *eccCounts); - -/** - * Retrieves the detailed ECC error counts for the device. - * - * @deprecated This API supports only a fixed set of ECC error locations - * On different GPU architectures different locations are supported - * See \ref nvmlDeviceGetMemoryErrorCounter - * - * For Fermi &tm; or newer fully supported devices. - * Only applicable to devices with ECC. - * Requires \a NVML_INFOROM_ECC version 2.0 or higher to report aggregate location-based ECC counts. - * Requires \a NVML_INFOROM_ECC version 1.0 or higher to report all other ECC counts. - * Requires ECC Mode to be enabled. - * - * Detailed errors provide separate ECC counts for specific parts of the memory system. - * - * Reports zero for unsupported ECC error counters when a subset of ECC error counters are supported. - * - * See \ref nvmlMemoryErrorType_t for a description of available bit types.\n - * See \ref nvmlEccCounterType_t for a description of available counter types.\n - * See \ref nvmlEccErrorCounts_t for a description of provided detailed ECC counts. - * - * @param device The identifier of the target device - * @param errorType Flag that specifies the type of the errors. - * @param counterType Flag that specifies the counter-type of the errors. - * @param eccCounts Reference in which to return the specified ECC errors - * - * @return - * - \ref NVML_SUCCESS if \a eccCounts has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a errorType or \a counterType is invalid, or \a eccCounts is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceClearEccErrorCounts() - */ -nvmlReturn_t DECLDIR nvmlDeviceGetDetailedEccErrors(nvmlDevice_t device, nvmlMemoryErrorType_t errorType, nvmlEccCounterType_t counterType, nvmlEccErrorCounts_t *eccCounts); - -/** - * Retrieves the requested memory error counter for the device. - * - * For Fermi &tm; or newer fully supported devices. - * Requires \a NVML_INFOROM_ECC version 2.0 or higher to report aggregate location-based memory error counts. - * Requires \a NVML_INFOROM_ECC version 1.0 or higher to report all other memory error counts. - * - * Only applicable to devices with ECC. - * - * Requires ECC Mode to be enabled. - * - * See \ref nvmlMemoryErrorType_t for a description of available memory error types.\n - * See \ref nvmlEccCounterType_t for a description of available counter types.\n - * See \ref nvmlMemoryLocation_t for a description of available counter locations.\n - * - * @param device The identifier of the target device - * @param errorType Flag that specifies the type of error. - * @param counterType Flag that specifies the counter-type of the errors. - * @param locationType Specifies the location of the counter. - * @param count Reference in which to return the ECC counter - * - * @return - * - \ref NVML_SUCCESS if \a count has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a bitTyp,e \a counterType or \a locationType is - * invalid, or \a count is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support ECC error reporting in the specified memory - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetMemoryErrorCounter(nvmlDevice_t device, nvmlMemoryErrorType_t errorType, - nvmlEccCounterType_t counterType, - nvmlMemoryLocation_t locationType, unsigned long long *count); - -/** - * Retrieves the current utilization rates for the device's major subsystems. - * - * For Fermi &tm; or newer fully supported devices. - * - * See \ref nvmlUtilization_t for details on available utilization rates. - * - * \note During driver initialization when ECC is enabled one can see high GPU and Memory Utilization readings. - * This is caused by ECC Memory Scrubbing mechanism that is performed during driver initialization. - * - * @param device The identifier of the target device - * @param utilization Reference in which to return the utilization information - * - * @return - * - \ref NVML_SUCCESS if \a utilization has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a utilization is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetUtilizationRates(nvmlDevice_t device, nvmlUtilization_t *utilization); - -/** - * Retrieves the current utilization and sampling size in microseconds for the Encoder - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param utilization Reference to an unsigned int for encoder utilization info - * @param samplingPeriodUs Reference to an unsigned int for the sampling period in US - * - * @return - * - \ref NVML_SUCCESS if \a utilization has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetEncoderUtilization(nvmlDevice_t device, unsigned int *utilization, unsigned int *samplingPeriodUs); - -/** - * Retrieves the current capacity of the device's encoder, in macroblocks per second. - * - * For Maxwell &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param encoderQueryType Type of encoder to query - * @param encoderCapacity Reference to an unsigned int for the encoder capacity - * - * @return - * - \ref NVML_SUCCESS if \a encoderCapacity is fetched - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a encoderCapacity is NULL, or \a device or \a encoderQueryType - * are invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if device does not support the encoder specified in \a encodeQueryType - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetEncoderCapacity (nvmlDevice_t device, nvmlEncoderType_t encoderQueryType, unsigned int *encoderCapacity); - -/** - * Retrieves the current encoder statistics for a given device. - * - * For Maxwell &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param sessionCount Reference to an unsigned int for count of active encoder sessions - * @param averageFps Reference to an unsigned int for trailing average FPS of all active sessions - * @param averageLatency Reference to an unsigned int for encode latency in microseconds - * - * @return - * - \ref NVML_SUCCESS if \a sessionCount, \a averageFps and \a averageLatency is fetched - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount, or \a device or \a averageFps, - * or \a averageLatency is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetEncoderStats (nvmlDevice_t device, unsigned int *sessionCount, - unsigned int *averageFps, unsigned int *averageLatency); - -/** - * Retrieves information about active encoder sessions on a target device. - * - * An array of active encoder sessions is returned in the caller-supplied buffer pointed at by \a sessionInfos. The - * array elememt count is passed in \a sessionCount, and \a sessionCount is used to return the number of sessions - * written to the buffer. - * - * If the supplied buffer is not large enough to accomodate the active session array, the function returns - * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlEncoderSessionInfo_t array required in \a sessionCount. - * To query the number of active encoder sessions, call this function with *sessionCount = 0. The code will return - * NVML_SUCCESS with number of active encoder sessions updated in *sessionCount. - * - * For Maxwell &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param sessionCount Reference to caller supplied array size, and returns the number of sessions. - * @param sessionInfos Reference in which to return the session information - * - * @return - * - \ref NVML_SUCCESS if \a sessionInfos is fetched - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a sessionCount is too small, array element count is returned in \a sessionCount - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount is NULL. - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetEncoderSessions(nvmlDevice_t device, unsigned int *sessionCount, nvmlEncoderSessionInfo_t *sessionInfos); - -/** - * Retrieves the current utilization and sampling size in microseconds for the Decoder - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param utilization Reference to an unsigned int for decoder utilization info - * @param samplingPeriodUs Reference to an unsigned int for the sampling period in US - * - * @return - * - \ref NVML_SUCCESS if \a utilization has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetDecoderUtilization(nvmlDevice_t device, unsigned int *utilization, unsigned int *samplingPeriodUs); - -/** - * Retrieves the current and pending driver model for the device. - * - * For Fermi &tm; or newer fully supported devices. - * For windows only. - * - * On Windows platforms the device driver can run in either WDDM or WDM (TCC) mode. If a display is attached - * to the device it must run in WDDM mode. TCC mode is preferred if a display is not attached. - * - * See \ref nvmlDriverModel_t for details on available driver models. - * - * @param device The identifier of the target device - * @param current Reference in which to return the current driver model - * @param pending Reference in which to return the pending driver model - * - * @return - * - \ref NVML_SUCCESS if either \a current and/or \a pending have been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or both \a current and \a pending are NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the platform is not windows - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceSetDriverModel() - */ -nvmlReturn_t DECLDIR nvmlDeviceGetDriverModel(nvmlDevice_t device, nvmlDriverModel_t *current, nvmlDriverModel_t *pending); - -/** - * Get VBIOS version of the device. - * - * For all products. - * - * The VBIOS version may change from time to time. It will not exceed 32 characters in length - * (including the NULL terminator). See \ref nvmlConstants::NVML_DEVICE_VBIOS_VERSION_BUFFER_SIZE. - * - * @param device The identifier of the target device - * @param version Reference to which to return the VBIOS version - * @param length The maximum allowed length of the string returned in \a version - * - * @return - * - \ref NVML_SUCCESS if \a version has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a version is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetVbiosVersion(nvmlDevice_t device, char *version, unsigned int length); - -/** - * Get Bridge Chip Information for all the bridge chips on the board. - * - * For all fully supported products. - * Only applicable to multi-GPU products. - * - * @param device The identifier of the target device - * @param bridgeHierarchy Reference to the returned bridge chip Hierarchy - * - * @return - * - \ref NVML_SUCCESS if bridge chip exists - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a bridgeInfo is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if bridge chip not supported on the device - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - */ -nvmlReturn_t DECLDIR nvmlDeviceGetBridgeChipInfo(nvmlDevice_t device, nvmlBridgeChipHierarchy_t *bridgeHierarchy); - -/** - * Get information about processes with a compute context on a device - * - * For Fermi &tm; or newer fully supported devices. - * - * This function returns information only about compute running processes (e.g. CUDA application which have - * active context). Any graphics applications (e.g. using OpenGL, DirectX) won't be listed by this function. - * - * To query the current number of running compute processes, call this function with *infoCount = 0. The - * return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if none are running. For this call - * \a infos is allowed to be NULL. - * - * The usedGpuMemory field returned is all of the memory used by the application. - * - * Keep in mind that information returned by this call is dynamic and the number of elements might change in - * time. Allocate more space for \a infos table in case new compute processes are spawned. - * - * @param device The identifier of the target device - * @param infoCount Reference in which to provide the \a infos array size, and - * to return the number of returned elements - * @param infos Reference in which to return the process information - * - * @return - * - \ref NVML_SUCCESS if \a infoCount and \a infos have been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a infoCount indicates that the \a infos array is too small - * \a infoCount will contain minimal amount of space necessary for - * the call to complete - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, either of \a infoCount or \a infos is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see \ref nvmlSystemGetProcessName - */ -nvmlReturn_t DECLDIR nvmlDeviceGetComputeRunningProcesses(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_t *infos); - -/** - * Get information about processes with a graphics context on a device - * - * For Kepler &tm; or newer fully supported devices. - * - * This function returns information only about graphics based processes - * (eg. applications using OpenGL, DirectX) - * - * To query the current number of running graphics processes, call this function with *infoCount = 0. The - * return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if none are running. For this call - * \a infos is allowed to be NULL. - * - * The usedGpuMemory field returned is all of the memory used by the application. - * - * Keep in mind that information returned by this call is dynamic and the number of elements might change in - * time. Allocate more space for \a infos table in case new graphics processes are spawned. - * - * @param device The identifier of the target device - * @param infoCount Reference in which to provide the \a infos array size, and - * to return the number of returned elements - * @param infos Reference in which to return the process information - * - * @return - * - \ref NVML_SUCCESS if \a infoCount and \a infos have been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a infoCount indicates that the \a infos array is too small - * \a infoCount will contain minimal amount of space necessary for - * the call to complete - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, either of \a infoCount or \a infos is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see \ref nvmlSystemGetProcessName - */ -nvmlReturn_t DECLDIR nvmlDeviceGetGraphicsRunningProcesses(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_t *infos); - -/** - * Check if the GPU devices are on the same physical board. - * - * For all fully supported products. - * - * @param device1 The first GPU device - * @param device2 The second GPU device - * @param onSameBoard Reference in which to return the status. - * Non-zero indicates that the GPUs are on the same board. - * - * @return - * - \ref NVML_SUCCESS if \a onSameBoard has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a dev1 or \a dev2 are invalid or \a onSameBoard is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if this check is not supported by the device - * - \ref NVML_ERROR_GPU_IS_LOST if the either GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceOnSameBoard(nvmlDevice_t device1, nvmlDevice_t device2, int *onSameBoard); - -/** - * Retrieves the root/admin permissions on the target API. See \a nvmlRestrictedAPI_t for the list of supported APIs. - * If an API is restricted only root users can call that API. See \a nvmlDeviceSetAPIRestriction to change current permissions. - * - * For all fully supported products. - * - * @param device The identifier of the target device - * @param apiType Target API type for this operation - * @param isRestricted Reference in which to return the current restriction - * NVML_FEATURE_ENABLED indicates that the API is root-only - * NVML_FEATURE_DISABLED indicates that the API is accessible to all users - * - * @return - * - \ref NVML_SUCCESS if \a isRestricted has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a apiType incorrect or \a isRestricted is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device or the device does not support - * the feature that is being queried (E.G. Enabling/disabling Auto Boosted clocks is - * not supported by the device) - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlRestrictedAPI_t - */ -nvmlReturn_t DECLDIR nvmlDeviceGetAPIRestriction(nvmlDevice_t device, nvmlRestrictedAPI_t apiType, nvmlEnableState_t *isRestricted); - -/** - * Gets recent samples for the GPU. - * - * For Kepler &tm; or newer fully supported devices. - * - * Based on type, this method can be used to fetch the power, utilization or clock samples maintained in the buffer by - * the driver. - * - * Power, Utilization and Clock samples are returned as type "unsigned int" for the union nvmlValue_t. - * - * To get the size of samples that user needs to allocate, the method is invoked with samples set to NULL. - * The returned samplesCount will provide the number of samples that can be queried. The user needs to - * allocate the buffer with size as samplesCount * sizeof(nvmlSample_t). - * - * lastSeenTimeStamp represents CPU timestamp in microseconds. Set it to 0 to fetch all the samples maintained by the - * underlying buffer. Set lastSeenTimeStamp to one of the timeStamps retrieved from the date of the previous query - * to get more recent samples. - * - * This method fetches the number of entries which can be accommodated in the provided samples array, and the - * reference samplesCount is updated to indicate how many samples were actually retrieved. The advantage of using this - * method for samples in contrast to polling via existing methods is to get get higher frequency data at lower polling cost. - * - * @param device The identifier for the target device - * @param type Type of sampling event - * @param lastSeenTimeStamp Return only samples with timestamp greater than lastSeenTimeStamp. - * @param sampleValType Output parameter to represent the type of sample value as described in nvmlSampleVal_t - * @param sampleCount Reference to provide the number of elements which can be queried in samples array - * @param samples Reference in which samples are returned - - * @return - * - \ref NVML_SUCCESS if samples are successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a samplesCount is NULL or - * reference to \a sampleCount is 0 for non null \a samples - * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_NOT_FOUND if sample entries are not found - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetSamples(nvmlDevice_t device, nvmlSamplingType_t type, unsigned long long lastSeenTimeStamp, - nvmlValueType_t *sampleValType, unsigned int *sampleCount, nvmlSample_t *samples); - -/** - * Gets Total, Available and Used size of BAR1 memory. - * - * BAR1 is used to map the FB (device memory) so that it can be directly accessed by the CPU or by 3rd party - * devices (peer-to-peer on the PCIE bus). - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param bar1Memory Reference in which BAR1 memory - * information is returned. - * - * @return - * - \ref NVML_SUCCESS if BAR1 memory is successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a bar1Memory is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - */ -nvmlReturn_t DECLDIR nvmlDeviceGetBAR1MemoryInfo(nvmlDevice_t device, nvmlBAR1Memory_t *bar1Memory); - - -/** - * Gets the duration of time during which the device was throttled (lower than requested clocks) due to power - * or thermal constraints. - * - * The method is important to users who are tying to understand if their GPUs throttle at any point during their applications. The - * difference in violation times at two different reference times gives the indication of GPU throttling event. - * - * Violation for thermal capping is not supported at this time. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param perfPolicyType Represents Performance policy which can trigger GPU throttling - * @param violTime Reference to which violation time related information is returned - * - * - * @return - * - \ref NVML_SUCCESS if violation time is successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a perfPolicyType is invalid, or \a violTime is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - */ -nvmlReturn_t DECLDIR nvmlDeviceGetViolationStatus(nvmlDevice_t device, nvmlPerfPolicyType_t perfPolicyType, nvmlViolationTime_t *violTime); - -/** - * @} - */ - -/** @addtogroup nvmlAccountingStats - * @{ - */ - -/** - * Queries the state of per process accounting mode. - * - * For Kepler &tm; or newer fully supported devices. - * - * See \ref nvmlDeviceGetAccountingStats for more details. - * See \ref nvmlDeviceSetAccountingMode - * - * @param device The identifier of the target device - * @param mode Reference in which to return the current accounting mode - * - * @return - * - \ref NVML_SUCCESS if the mode has been successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode are NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetAccountingMode(nvmlDevice_t device, nvmlEnableState_t *mode); - -/** - * Queries process's accounting stats. - * - * For Kepler &tm; or newer fully supported devices. - * - * Accounting stats capture GPU utilization and other statistics across the lifetime of a process. - * Accounting stats can be queried during life time of the process and after its termination. - * The time field in \ref nvmlAccountingStats_t is reported as 0 during the lifetime of the process and - * updated to actual running time after its termination. - * Accounting stats are kept in a circular buffer, newly created processes overwrite information about old - * processes. - * - * See \ref nvmlAccountingStats_t for description of each returned metric. - * List of processes that can be queried can be retrieved from \ref nvmlDeviceGetAccountingPids. - * - * @note Accounting Mode needs to be on. See \ref nvmlDeviceGetAccountingMode. - * @note Only compute and graphics applications stats can be queried. Monitoring applications stats can't be - * queried since they don't contribute to GPU utilization. - * @note In case of pid collision stats of only the latest process (that terminated last) will be reported - * - * @warning On Kepler devices per process statistics are accurate only if there's one process running on a GPU. - * - * @param device The identifier of the target device - * @param pid Process Id of the target process to query stats for - * @param stats Reference in which to return the process's accounting stats - * - * @return - * - \ref NVML_SUCCESS if stats have been successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a stats are NULL - * - \ref NVML_ERROR_NOT_FOUND if process stats were not found - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature or accounting mode is disabled - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetAccountingBufferSize - */ -nvmlReturn_t DECLDIR nvmlDeviceGetAccountingStats(nvmlDevice_t device, unsigned int pid, nvmlAccountingStats_t *stats); - -/** - * Queries list of processes that can be queried for accounting stats. The list of processes returned - * can be in running or terminated state. - * - * For Kepler &tm; or newer fully supported devices. - * - * To just query the number of processes ready to be queried, call this function with *count = 0 and - * pids=NULL. The return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if list is empty. - * - * For more details see \ref nvmlDeviceGetAccountingStats. - * - * @note In case of PID collision some processes might not be accessible before the circular buffer is full. - * - * @param device The identifier of the target device - * @param count Reference in which to provide the \a pids array size, and - * to return the number of elements ready to be queried - * @param pids Reference in which to return list of process ids - * - * @return - * - \ref NVML_SUCCESS if pids were successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a count is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature or accounting mode is disabled - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small (\a count is set to - * expected value) - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetAccountingBufferSize - */ -nvmlReturn_t DECLDIR nvmlDeviceGetAccountingPids(nvmlDevice_t device, unsigned int *count, unsigned int *pids); - -/** - * Returns the number of processes that the circular buffer with accounting pids can hold. - * - * For Kepler &tm; or newer fully supported devices. - * - * This is the maximum number of processes that accounting information will be stored for before information - * about oldest processes will get overwritten by information about new processes. - * - * @param device The identifier of the target device - * @param bufferSize Reference in which to provide the size (in number of elements) - * of the circular buffer for accounting stats. - * - * @return - * - \ref NVML_SUCCESS if buffer size was successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a bufferSize is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature or accounting mode is disabled - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetAccountingStats - * @see nvmlDeviceGetAccountingPids - */ -nvmlReturn_t DECLDIR nvmlDeviceGetAccountingBufferSize(nvmlDevice_t device, unsigned int *bufferSize); - -/** @} */ - -/** @addtogroup nvmlDeviceQueries - * @{ - */ - -/** - * Returns the list of retired pages by source, including pages that are pending retirement - * The address information provided from this API is the hardware address of the page that was retired. Note - * that this does not match the virtual address used in CUDA, but will match the address information in XID 63 - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param cause Filter page addresses by cause of retirement - * @param pageCount Reference in which to provide the \a addresses buffer size, and - * to return the number of retired pages that match \a cause - * Set to 0 to query the size without allocating an \a addresses buffer - * @param addresses Buffer to write the page addresses into - * - * @return - * - \ref NVML_SUCCESS if \a pageCount was populated and \a addresses was filled - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a pageCount indicates the buffer is not large enough to store all the - * matching page addresses. \a pageCount is set to the needed size. - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a pageCount is NULL, \a cause is invalid, or - * \a addresses is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetRetiredPages(nvmlDevice_t device, nvmlPageRetirementCause_t cause, - unsigned int *pageCount, unsigned long long *addresses); - -/** - * Check if any pages are pending retirement and need a reboot to fully retire. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param isPending Reference in which to return the pending status - * - * @return - * - \ref NVML_SUCCESS if \a isPending was populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a isPending is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetRetiredPagesPendingStatus(nvmlDevice_t device, nvmlEnableState_t *isPending); - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlUnitCommands Unit Commands - * This chapter describes NVML operations that change the state of the unit. For S-class products. - * Each of these requires root/admin access. Non-admin users will see an NVML_ERROR_NO_PERMISSION - * error code when invoking any of these methods. - * @{ - */ -/***************************************************************************************************/ - -/** - * Set the LED state for the unit. The LED can be either green (0) or amber (1). - * - * For S-class products. - * Requires root/admin permissions. - * - * This operation takes effect immediately. - * - * - * Current S-Class products don't provide unique LEDs for each unit. As such, both front - * and back LEDs will be toggled in unison regardless of which unit is specified with this command. - * - * See \ref nvmlLedColor_t for available colors. - * - * @param unit The identifier of the target unit - * @param color The target LED color - * - * @return - * - \ref NVML_SUCCESS if the LED color has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit or \a color is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlUnitGetLedState() - */ -nvmlReturn_t DECLDIR nvmlUnitSetLedState(nvmlUnit_t unit, nvmlLedColor_t color); - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlDeviceCommands Device Commands - * This chapter describes NVML operations that change the state of the device. - * Each of these requires root/admin access. Non-admin users will see an NVML_ERROR_NO_PERMISSION - * error code when invoking any of these methods. - * @{ - */ -/***************************************************************************************************/ - -/** - * Set the persistence mode for the device. - * - * For all products. - * For Linux only. - * Requires root/admin permissions. - * - * The persistence mode determines whether the GPU driver software is torn down after the last client - * exits. - * - * This operation takes effect immediately. It is not persistent across reboots. After each reboot the - * persistence mode is reset to "Disabled". - * - * See \ref nvmlEnableState_t for available modes. - * - * @param device The identifier of the target device - * @param mode The target persistence mode - * - * @return - * - \ref NVML_SUCCESS if the persistence mode was set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetPersistenceMode() - */ -nvmlReturn_t DECLDIR nvmlDeviceSetPersistenceMode(nvmlDevice_t device, nvmlEnableState_t mode); - -/** - * Set the compute mode for the device. - * - * For all products. - * Requires root/admin permissions. - * - * The compute mode determines whether a GPU can be used for compute operations and whether it can - * be shared across contexts. - * - * This operation takes effect immediately. Under Linux it is not persistent across reboots and - * always resets to "Default". Under windows it is persistent. - * - * Under windows compute mode may only be set to DEFAULT when running in WDDM - * - * See \ref nvmlComputeMode_t for details on available compute modes. - * - * @param device The identifier of the target device - * @param mode The target compute mode - * - * @return - * - \ref NVML_SUCCESS if the compute mode was set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetComputeMode() - */ -nvmlReturn_t DECLDIR nvmlDeviceSetComputeMode(nvmlDevice_t device, nvmlComputeMode_t mode); - -/** - * Set the ECC mode for the device. - * - * For Kepler &tm; or newer fully supported devices. - * Only applicable to devices with ECC. - * Requires \a NVML_INFOROM_ECC version 1.0 or higher. - * Requires root/admin permissions. - * - * The ECC mode determines whether the GPU enables its ECC support. - * - * This operation takes effect after the next reboot. - * - * See \ref nvmlEnableState_t for details on available modes. - * - * @param device The identifier of the target device - * @param ecc The target ECC mode - * - * @return - * - \ref NVML_SUCCESS if the ECC mode was set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a ecc is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetEccMode() - */ -nvmlReturn_t DECLDIR nvmlDeviceSetEccMode(nvmlDevice_t device, nvmlEnableState_t ecc); - -/** - * Clear the ECC error and other memory error counts for the device. - * - * For Kepler &tm; or newer fully supported devices. - * Only applicable to devices with ECC. - * Requires \a NVML_INFOROM_ECC version 2.0 or higher to clear aggregate location-based ECC counts. - * Requires \a NVML_INFOROM_ECC version 1.0 or higher to clear all other ECC counts. - * Requires root/admin permissions. - * Requires ECC Mode to be enabled. - * - * Sets all of the specified ECC counters to 0, including both detailed and total counts. - * - * This operation takes effect immediately. - * - * See \ref nvmlMemoryErrorType_t for details on available counter types. - * - * @param device The identifier of the target device - * @param counterType Flag that indicates which type of errors should be cleared. - * - * @return - * - \ref NVML_SUCCESS if the error counts were cleared - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a counterType is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see - * - nvmlDeviceGetDetailedEccErrors() - * - nvmlDeviceGetTotalEccErrors() - */ -nvmlReturn_t DECLDIR nvmlDeviceClearEccErrorCounts(nvmlDevice_t device, nvmlEccCounterType_t counterType); - -/** - * Set the driver model for the device. - * - * For Fermi &tm; or newer fully supported devices. - * For windows only. - * Requires root/admin permissions. - * - * On Windows platforms the device driver can run in either WDDM or WDM (TCC) mode. If a display is attached - * to the device it must run in WDDM mode. - * - * It is possible to force the change to WDM (TCC) while the display is still attached with a force flag (nvmlFlagForce). - * This should only be done if the host is subsequently powered down and the display is detached from the device - * before the next reboot. - * - * This operation takes effect after the next reboot. - * - * Windows driver model may only be set to WDDM when running in DEFAULT compute mode. - * - * Change driver model to WDDM is not supported when GPU doesn't support graphics acceleration or - * will not support it after reboot. See \ref nvmlDeviceSetGpuOperationMode. - * - * See \ref nvmlDriverModel_t for details on available driver models. - * See \ref nvmlFlagDefault and \ref nvmlFlagForce - * - * @param device The identifier of the target device - * @param driverModel The target driver model - * @param flags Flags that change the default behavior - * - * @return - * - \ref NVML_SUCCESS if the driver model has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a driverModel is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the platform is not windows or the device does not support this feature - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetDriverModel() - */ -nvmlReturn_t DECLDIR nvmlDeviceSetDriverModel(nvmlDevice_t device, nvmlDriverModel_t driverModel, unsigned int flags); - -/** - * Set clocks that applications will lock to. - * - * Sets the clocks that compute and graphics applications will be running at. - * e.g. CUDA driver requests these clocks during context creation which means this property - * defines clocks at which CUDA applications will be running unless some overspec event - * occurs (e.g. over power, over thermal or external HW brake). - * - * Can be used as a setting to request constant performance. - * - * On Pascal and newer hardware, this will automatically disable automatic boosting of clocks. - * - * On K80 and newer Kepler and Maxwell GPUs, users desiring fixed performance should also call - * \ref nvmlDeviceSetAutoBoostedClocksEnabled to prevent clocks from automatically boosting - * above the clock value being set. - * - * For Kepler &tm; or newer non-GeForce fully supported devices and Maxwell or newer GeForce devices. - * Requires root/admin permissions. - * - * See \ref nvmlDeviceGetSupportedMemoryClocks and \ref nvmlDeviceGetSupportedGraphicsClocks - * for details on how to list available clocks combinations. - * - * After system reboot or driver reload applications clocks go back to their default value. - * See \ref nvmlDeviceResetApplicationsClocks. - * - * @param device The identifier of the target device - * @param memClockMHz Requested memory clock in MHz - * @param graphicsClockMHz Requested graphics clock in MHz - * - * @return - * - \ref NVML_SUCCESS if new settings were successfully set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memClockMHz and \a graphicsClockMHz - * is not a valid clock combination - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceSetApplicationsClocks(nvmlDevice_t device, unsigned int memClockMHz, unsigned int graphicsClockMHz); - -/** - * Set new power limit of this device. - * - * For Kepler &tm; or newer fully supported devices. - * Requires root/admin permissions. - * - * See \ref nvmlDeviceGetPowerManagementLimitConstraints to check the allowed ranges of values. - * - * \note Limit is not persistent across reboots or driver unloads. - * Enable persistent mode to prevent driver from unloading when no application is using the device. - * - * @param device The identifier of the target device - * @param limit Power management limit in milliwatts to set - * - * @return - * - \ref NVML_SUCCESS if \a limit has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a defaultLimit is out of range - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetPowerManagementLimitConstraints - * @see nvmlDeviceGetPowerManagementDefaultLimit - */ -nvmlReturn_t DECLDIR nvmlDeviceSetPowerManagementLimit(nvmlDevice_t device, unsigned int limit); - -/** - * Sets new GOM. See \a nvmlGpuOperationMode_t for details. - * - * For GK110 M-class and X-class Tesla &tm; products from the Kepler family. - * Modes \ref NVML_GOM_LOW_DP and \ref NVML_GOM_ALL_ON are supported on fully supported GeForce products. - * Not supported on Quadro ® and Tesla &tm; C-class products. - * Requires root/admin permissions. - * - * Changing GOMs requires a reboot. - * The reboot requirement might be removed in the future. - * - * Compute only GOMs don't support graphics acceleration. Under windows switching to these GOMs when - * pending driver model is WDDM is not supported. See \ref nvmlDeviceSetDriverModel. - * - * @param device The identifier of the target device - * @param mode Target GOM - * - * @return - * - \ref NVML_SUCCESS if \a mode has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode incorrect - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support GOM or specific mode - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlGpuOperationMode_t - * @see nvmlDeviceGetGpuOperationMode - */ -nvmlReturn_t DECLDIR nvmlDeviceSetGpuOperationMode(nvmlDevice_t device, nvmlGpuOperationMode_t mode); - -/** - * Changes the root/admin restructions on certain APIs. See \a nvmlRestrictedAPI_t for the list of supported APIs. - * This method can be used by a root/admin user to give non-root/admin access to certain otherwise-restricted APIs. - * The new setting lasts for the lifetime of the NVIDIA driver; it is not persistent. See \a nvmlDeviceGetAPIRestriction - * to query the current restriction settings. - * - * For Kepler &tm; or newer fully supported devices. - * Requires root/admin permissions. - * - * @param device The identifier of the target device - * @param apiType Target API type for this operation - * @param isRestricted The target restriction - * - * @return - * - \ref NVML_SUCCESS if \a isRestricted has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a apiType incorrect - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support changing API restrictions or the device does not support - * the feature that api restrictions are being set for (E.G. Enabling/disabling auto - * boosted clocks is not supported by the device) - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlRestrictedAPI_t - */ -nvmlReturn_t DECLDIR nvmlDeviceSetAPIRestriction(nvmlDevice_t device, nvmlRestrictedAPI_t apiType, nvmlEnableState_t isRestricted); - -/** - * @} - */ - -/** @addtogroup nvmlAccountingStats - * @{ - */ - -/** - * Enables or disables per process accounting. - * - * For Kepler &tm; or newer fully supported devices. - * Requires root/admin permissions. - * - * @note This setting is not persistent and will default to disabled after driver unloads. - * Enable persistence mode to be sure the setting doesn't switch off to disabled. - * - * @note Enabling accounting mode has no negative impact on the GPU performance. - * - * @note Disabling accounting clears all accounting pids information. - * - * See \ref nvmlDeviceGetAccountingMode - * See \ref nvmlDeviceGetAccountingStats - * See \ref nvmlDeviceClearAccountingPids - * - * @param device The identifier of the target device - * @param mode The target accounting mode - * - * @return - * - \ref NVML_SUCCESS if the new mode has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a mode are invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceSetAccountingMode(nvmlDevice_t device, nvmlEnableState_t mode); - -/** - * Clears accounting information about all processes that have already terminated. - * - * For Kepler &tm; or newer fully supported devices. - * Requires root/admin permissions. - * - * See \ref nvmlDeviceGetAccountingMode - * See \ref nvmlDeviceGetAccountingStats - * See \ref nvmlDeviceSetAccountingMode - * - * @param device The identifier of the target device - * - * @return - * - \ref NVML_SUCCESS if accounting information has been cleared - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device are invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceClearAccountingPids(nvmlDevice_t device); - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup NvLink NvLink Methods - * This chapter describes methods that NVML can perform on NVLINK enabled devices. - * @{ - */ -/***************************************************************************************************/ - -/** - * Retrieves the state of the device's NvLink for the link specified - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param link Specifies the NvLink link to be queried - * @param isActive \a nvmlEnableState_t where NVML_FEATURE_ENABLED indicates that - * the link is active and NVML_FEATURE_DISABLED indicates it - * is inactive - * - * @return - * - \ref NVML_SUCCESS if \a isActive has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid or \a isActive is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkState(nvmlDevice_t device, unsigned int link, nvmlEnableState_t *isActive); - -/** - * Retrieves the version of the device's NvLink for the link specified - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param link Specifies the NvLink link to be queried - * @param version Requested NvLink version - * - * @return - * - \ref NVML_SUCCESS if \a version has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid or \a version is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkVersion(nvmlDevice_t device, unsigned int link, unsigned int *version); - -/** - * Retrieves the requested capability from the device's NvLink for the link specified - * Please refer to the \a nvmlNvLinkCapability_t structure for the specific caps that can be queried - * The return value should be treated as a boolean. - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param link Specifies the NvLink link to be queried - * @param capability Specifies the \a nvmlNvLinkCapability_t to be queried - * @param capResult A boolean for the queried capability indicating that feature is available - * - * @return - * - \ref NVML_SUCCESS if \a capResult has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a link, or \a capability is invalid or \a capResult is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkCapability(nvmlDevice_t device, unsigned int link, - nvmlNvLinkCapability_t capability, unsigned int *capResult); - -/** - * Retrieves the PCI information for the remote node on a NvLink link - * Note: pciSubSystemId is not filled in this function and is indeterminate - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param link Specifies the NvLink link to be queried - * @param pci \a nvmlPciInfo_t of the remote node for the specified link - * - * @return - * - \ref NVML_SUCCESS if \a pci has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid or \a pci is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkRemotePciInfo(nvmlDevice_t device, unsigned int link, nvmlPciInfo_t *pci); - -/** - * Retrieves the specified error counter value - * Please refer to \a nvmlNvLinkErrorCounter_t for error counters that are available - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param link Specifies the NvLink link to be queried - * @param counter Specifies the NvLink counter to be queried - * @param counterValue Returned counter value - * - * @return - * - \ref NVML_SUCCESS if \a counter has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a link, or \a counter is invalid or \a counterValue is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkErrorCounter(nvmlDevice_t device, unsigned int link, - nvmlNvLinkErrorCounter_t counter, unsigned long long *counterValue); - -/** - * Resets all error counters to zero - * Please refer to \a nvmlNvLinkErrorCounter_t for the list of error counters that are reset - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param link Specifies the NvLink link to be queried - * - * @return - * - \ref NVML_SUCCESS if the reset is successful - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceResetNvLinkErrorCounters(nvmlDevice_t device, unsigned int link); - -/** - * Set the NVLINK utilization counter control information for the specified counter, 0 or 1. - * Please refer to \a nvmlNvLinkUtilizationControl_t for the structure definition. Performs a reset - * of the counters if the reset parameter is non-zero. - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param counter Specifies the counter that should be set (0 or 1). - * @param link Specifies the NvLink link to be queried - * @param control A reference to the \a nvmlNvLinkUtilizationControl_t to set - * @param reset Resets the counters on set if non-zero - * - * @return - * - \ref NVML_SUCCESS if the control has been set successfully - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a counter, \a link, or \a control is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceSetNvLinkUtilizationControl(nvmlDevice_t device, unsigned int link, unsigned int counter, - nvmlNvLinkUtilizationControl_t *control, unsigned int reset); - -/** - * Get the NVLINK utilization counter control information for the specified counter, 0 or 1. - * Please refer to \a nvmlNvLinkUtilizationControl_t for the structure definition - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param counter Specifies the counter that should be set (0 or 1). - * @param link Specifies the NvLink link to be queried - * @param control A reference to the \a nvmlNvLinkUtilizationControl_t to place information - * - * @return - * - \ref NVML_SUCCESS if the control has been set successfully - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a counter, \a link, or \a control is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkUtilizationControl(nvmlDevice_t device, unsigned int link, unsigned int counter, - nvmlNvLinkUtilizationControl_t *control); - - -/** - * Retrieve the NVLINK utilization counter based on the current control for a specified counter. - * In general it is good practice to use \a nvmlDeviceSetNvLinkUtilizationControl - * before reading the utilization counters as they have no default state - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param link Specifies the NvLink link to be queried - * @param counter Specifies the counter that should be read (0 or 1). - * @param rxcounter Receive counter return value - * @param txcounter Transmit counter return value - * - * @return - * - \ref NVML_SUCCESS if \a rxcounter and \a txcounter have been successfully set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a counter, or \a link is invalid or \a rxcounter or \a txcounter are NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkUtilizationCounter(nvmlDevice_t device, unsigned int link, unsigned int counter, - unsigned long long *rxcounter, unsigned long long *txcounter); - -/** - * Freeze the NVLINK utilization counters - * Both the receive and transmit counters are operated on by this function - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param link Specifies the NvLink link to be queried - * @param counter Specifies the counter that should be frozen (0 or 1). - * @param freeze NVML_FEATURE_ENABLED = freeze the receive and transmit counters - * NVML_FEATURE_DISABLED = unfreeze the receive and transmit counters - * - * @return - * - \ref NVML_SUCCESS if counters were successfully frozen or unfrozen - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a link, \a counter, or \a freeze is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceFreezeNvLinkUtilizationCounter (nvmlDevice_t device, unsigned int link, - unsigned int counter, nvmlEnableState_t freeze); - -/** - * Reset the NVLINK utilization counters - * Both the receive and transmit counters are operated on by this function - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param link Specifies the NvLink link to be reset - * @param counter Specifies the counter that should be reset (0 or 1) - * - * @return - * - \ref NVML_SUCCESS if counters were successfully reset - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a link, or \a counter is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceResetNvLinkUtilizationCounter (nvmlDevice_t device, unsigned int link, unsigned int counter); - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlEvents Event Handling Methods - * This chapter describes methods that NVML can perform against each device to register and wait for - * some event to occur. - * @{ - */ -/***************************************************************************************************/ - -/** - * Create an empty set of events. - * Event set should be freed by \ref nvmlEventSetFree - * - * For Fermi &tm; or newer fully supported devices. - * @param set Reference in which to return the event handle - * - * @return - * - \ref NVML_SUCCESS if the event has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a set is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlEventSetFree - */ -nvmlReturn_t DECLDIR nvmlEventSetCreate(nvmlEventSet_t *set); - -/** - * Starts recording of events on a specified devices and add the events to specified \ref nvmlEventSet_t - * - * For Fermi &tm; or newer fully supported devices. - * Ecc events are available only on ECC enabled devices (see \ref nvmlDeviceGetTotalEccErrors) - * Power capping events are available only on Power Management enabled devices (see \ref nvmlDeviceGetPowerManagementMode) - * - * For Linux only. - * - * \b IMPORTANT: Operations on \a set are not thread safe - * - * This call starts recording of events on specific device. - * All events that occurred before this call are not recorded. - * Checking if some event occurred can be done with \ref nvmlEventSetWait - * - * If function reports NVML_ERROR_UNKNOWN, event set is in undefined state and should be freed. - * If function reports NVML_ERROR_NOT_SUPPORTED, event set can still be used. None of the requested eventTypes - * are registered in that case. - * - * @param device The identifier of the target device - * @param eventTypes Bitmask of \ref nvmlEventType to record - * @param set Set to which add new event types - * - * @return - * - \ref NVML_SUCCESS if the event has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a eventTypes is invalid or \a set is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the platform does not support this feature or some of requested event types - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlEventType - * @see nvmlDeviceGetSupportedEventTypes - * @see nvmlEventSetWait - * @see nvmlEventSetFree - */ -nvmlReturn_t DECLDIR nvmlDeviceRegisterEvents(nvmlDevice_t device, unsigned long long eventTypes, nvmlEventSet_t set); - -/** - * Returns information about events supported on device - * - * For Fermi &tm; or newer fully supported devices. - * - * Events are not supported on Windows. So this function returns an empty mask in \a eventTypes on Windows. - * - * @param device The identifier of the target device - * @param eventTypes Reference in which to return bitmask of supported events - * - * @return - * - \ref NVML_SUCCESS if the eventTypes has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a eventType is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlEventType - * @see nvmlDeviceRegisterEvents - */ -nvmlReturn_t DECLDIR nvmlDeviceGetSupportedEventTypes(nvmlDevice_t device, unsigned long long *eventTypes); - -/** - * Waits on events and delivers events - * - * For Fermi &tm; or newer fully supported devices. - * - * If some events are ready to be delivered at the time of the call, function returns immediately. - * If there are no events ready to be delivered, function sleeps till event arrives - * but not longer than specified timeout. This function in certain conditions can return before - * specified timeout passes (e.g. when interrupt arrives) - * - * In case of xid error, the function returns the most recent xid error type seen by the system. If there are multiple - * xid errors generated before nvmlEventSetWait is invoked then the last seen xid error type is returned for all - * xid error events. - * - * @param set Reference to set of events to wait on - * @param data Reference in which to return event data - * @param timeoutms Maximum amount of wait time in milliseconds for registered event - * - * @return - * - \ref NVML_SUCCESS if the data has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a data is NULL - * - \ref NVML_ERROR_TIMEOUT if no event arrived in specified timeout or interrupt arrived - * - \ref NVML_ERROR_GPU_IS_LOST if a GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlEventType - * @see nvmlDeviceRegisterEvents - */ -nvmlReturn_t DECLDIR nvmlEventSetWait(nvmlEventSet_t set, nvmlEventData_t * data, unsigned int timeoutms); - -/** - * Releases events in the set - * - * For Fermi &tm; or newer fully supported devices. - * - * @param set Reference to events to be released - * - * @return - * - \ref NVML_SUCCESS if the event has been successfully released - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceRegisterEvents - */ -nvmlReturn_t DECLDIR nvmlEventSetFree(nvmlEventSet_t set); - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlZPI Drain states - * This chapter describes methods that NVML can perform against each device to control their drain state - * and recognition by NVML and NVIDIA kernel driver. These methods can be used with out-of-band tools to - * power on/off GPUs, enable robust reset scenarios, etc. - * @{ - */ -/***************************************************************************************************/ - -/** - * Modify the drain state of a GPU. This method forces a GPU to no longer accept new incoming requests. - * Any new NVML process will no longer see this GPU. Persistence mode for this GPU must be turned off before - * this call is made. - * Must be called as administrator. - * For Linux only. - * - * For Pascal &tm; or newer fully supported devices. - * Some Kepler devices supported. - * - * @param pciInfo The PCI address of the GPU drain state to be modified - * @param newState The drain state that should be entered, see \ref nvmlEnableState_t - * - * @return - * - \ref NVML_SUCCESS if counters were successfully reset - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a nvmlIndex or \a newState is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_NO_PERMISSION if the calling process has insufficient permissions to perform operation - * - \ref NVML_ERROR_IN_USE if the device has persistence mode turned on - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceModifyDrainState (nvmlPciInfo_t *pciInfo, nvmlEnableState_t newState); - -/** - * Query the drain state of a GPU. This method is used to check if a GPU is in a currently draining - * state. - * For Linux only. - * - * For Pascal &tm; or newer fully supported devices. - * Some Kepler devices supported. - * - * @param pciInfo The PCI address of the GPU drain state to be queried - * @param currentState The current drain state for this GPU, see \ref nvmlEnableState_t - * - * @return - * - \ref NVML_SUCCESS if counters were successfully reset - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a nvmlIndex or \a currentState is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceQueryDrainState (nvmlPciInfo_t *pciInfo, nvmlEnableState_t *currentState); - -/** - * This method will remove the specified GPU from the view of both NVML and the NVIDIA kernel driver - * as long as no other processes are attached. If other processes are attached, this call will return - * NVML_ERROR_IN_USE and the GPU will be returned to its original "draining" state. Note: the - * only situation where a process can still be attached after nvmlDeviceModifyDrainState() is called - * to initiate the draining state is if that process was using, and is still using, a GPU before the - * call was made. Also note, persistence mode counts as an attachment to the GPU thus it must be disabled - * prior to this call. - * - * For long-running NVML processes please note that this will change the enumeration of current GPUs. - * For example, if there are four GPUs present and GPU1 is removed, the new enumeration will be 0-2. - * Also, device handles after the removed GPU will not be valid and must be re-established. - * Must be run as administrator. - * For Linux only. - * - * For Pascal &tm; or newer fully supported devices. - * Some Kepler devices supported. - * - * @param pciInfo The PCI address of the GPU to be removed - * - * @return - * - \ref NVML_SUCCESS if counters were successfully reset - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a nvmlIndex is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_IN_USE if the device is still in use and cannot be removed - */ -nvmlReturn_t DECLDIR nvmlDeviceRemoveGpu (nvmlPciInfo_t *pciInfo); - -/** - * Request the OS and the NVIDIA kernel driver to rediscover a portion of the PCI subsystem looking for GPUs that - * were previously removed. The portion of the PCI tree can be narrowed by specifying a domain, bus, and device. - * If all are zeroes then the entire PCI tree will be searched. Please note that for long-running NVML processes - * the enumeration will change based on how many GPUs are discovered and where they are inserted in bus order. - * - * In addition, all newly discovered GPUs will be initialized and their ECC scrubbed which may take several seconds - * per GPU. Also, all device handles are no longer guaranteed to be valid post discovery. - * - * Must be run as administrator. - * For Linux only. - * - * For Pascal &tm; or newer fully supported devices. - * Some Kepler devices supported. - * - * @param pciInfo The PCI tree to be searched. Only the domain, bus, and device - * fields are used in this call. - * - * @return - * - \ref NVML_SUCCESS if counters were successfully reset - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a pciInfo is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the operating system does not support this feature - * - \ref NVML_ERROR_OPERATING_SYSTEM if the operating system is denying this feature - * - \ref NVML_ERROR_NO_PERMISSION if the calling process has insufficient permissions to perform operation - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceDiscoverGpus (nvmlPciInfo_t *pciInfo); - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlFieldValueQueries Field Value Queries - * This chapter describes NVML operations that are associated with retrieving Field Values from NVML - * @{ - */ -/***************************************************************************************************/ - -/** - * Request values for a list of fields for a device. This API allows multiple fields to be queried at once. - * If any of the underlying fieldIds are populated by the same driver call, the results for those field IDs - * will be populated from a single call rather than making a driver call for each fieldId. - * - * @param device The device handle of the GPU to request field values for - * @param valuesCount Number of entries in values that should be retrieved - * @param values Array of \a valuesCount structures to hold field values. - * Each value's fieldId must be populated prior to this call - * - * @return - * - \ref NVML_SUCCESS if any values in \a values were populated. Note that you must - * check the nvmlReturn field of each value for each individual - * status - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a values is NULL - */ -nvmlReturn_t DECLDIR nvmlDeviceGetFieldValues(nvmlDevice_t device, int valuesCount, nvmlFieldValue_t *values); - - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlGridQueries Grid Queries - * This chapter describes NVML operations that are associated with NVIDIA GRID products. - * @{ - */ -/***************************************************************************************************/ - -/** - * This method is used to get the virtualization mode corresponding to the GPU. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device Identifier of the target device - * @param pVirtualMode Reference to virtualization mode. One of NVML_GPU_VIRTUALIZATION_? - * - * @return - * - \ref NVML_SUCCESS if \a pVirtualMode is fetched - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pVirtualMode is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetVirtualizationMode(nvmlDevice_t device, nvmlGpuVirtualizationMode_t *pVirtualMode); - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlGridCommands Grid Commands - * This chapter describes NVML operations that are associated with NVIDIA GRID products. - * @{ - */ -/***************************************************************************************************/ - -/** - * This method is used to set the virtualization mode corresponding to the GPU. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device Identifier of the target device - * @param virtualMode virtualization mode. One of NVML_GPU_VIRTUALIZATION_? - * - * @return - * - \ref NVML_SUCCESS if \a pVirtualMode is set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pVirtualMode is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_NOT_SUPPORTED if setting of virtualization mode is not supported. - * - \ref NVML_ERROR_NO_PERMISSION if setting of virtualization mode is not allowed for this client. - */ -nvmlReturn_t DECLDIR nvmlDeviceSetVirtualizationMode(nvmlDevice_t device, nvmlGpuVirtualizationMode_t virtualMode); - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlVgpu vGPU Management - * @{ - * - * Set of APIs supporting GRID vGPU - */ -/***************************************************************************************************/ - -/** - * Retrieve the supported vGPU types on a physical GPU (device). - * - * An array of supported vGPU types for the physical GPU indicated by \a device is returned in the caller-supplied buffer - * pointed at by \a vgpuTypeIds. The element count of nvmlVgpuTypeId_t array is passed in \a vgpuCount, and \a vgpuCount - * is used to return the number of vGPU types written to the buffer. - * - * If the supplied buffer is not large enough to accomodate the vGPU type array, the function returns - * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlVgpuTypeId_t array required in \a vgpuCount. - * To query the number of vGPU types supported for the GPU, call this function with *vgpuCount = 0. - * The code will return NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if no vGPU types are supported. - * - * @param device The identifier of the target device - * @param vgpuCount Pointer to caller-supplied array size, and returns number of vGPU types - * @param vgpuTypeIds Pointer to caller-supplied array in which to return list of vGPU types - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_INSUFFICIENT_SIZE \a vgpuTypeIds buffer is too small, array element count is returned in \a vgpuCount - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuCount is NULL or \a device is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device - * - \ref NVML_ERROR_VGPU_ECC_NOT_SUPPORTED if ECC is enabled on the device - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetSupportedVgpus(nvmlDevice_t device, unsigned int *vgpuCount, nvmlVgpuTypeId_t *vgpuTypeIds); - -/** - * Retrieve the currently creatable vGPU types on a physical GPU (device). - * - * An array of creatable vGPU types for the physical GPU indicated by \a device is returned in the caller-supplied buffer - * pointed at by \a vgpuTypeIds. The element count of nvmlVgpuTypeId_t array is passed in \a vgpuCount, and \a vgpuCount - * is used to return the number of vGPU types written to the buffer. - * - * The creatable vGPU types for a device may differ over time, as there may be restrictions on what type of vGPU types - * can concurrently run on a device. For example, if only one vGPU type is allowed at a time on a device, then the creatable - * list will be restricted to whatever vGPU type is already running on the device. - * - * If the supplied buffer is not large enough to accomodate the vGPU type array, the function returns - * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlVgpuTypeId_t array required in \a vgpuCount. - * To query the number of vGPU types createable for the GPU, call this function with *vgpuCount = 0. - * The code will return NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if no vGPU types are creatable. - * - * @param device The identifier of the target device - * @param vgpuCount Pointer to caller-supplied array size, and returns number of vGPU types - * @param vgpuTypeIds Pointer to caller-supplied array in which to return list of vGPU types - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_INSUFFICIENT_SIZE \a vgpuTypeIds buffer is too small, array element count is returned in \a vgpuCount - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuCount is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device - * - \ref NVML_ERROR_VGPU_ECC_NOT_SUPPORTED if ECC is enabled on the device - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetCreatableVgpus(nvmlDevice_t device, unsigned int *vgpuCount, nvmlVgpuTypeId_t *vgpuTypeIds); - -/** - * Retrieve the class of a vGPU type. It will not exceed 64 characters in length (including the NUL terminator). - * See \ref nvmlConstants::NVML_DEVICE_NAME_BUFFER_SIZE. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuTypeId Handle to vGPU type - * @param vgpuTypeClass Pointer to string array to return class in - * @param size Size of string - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a vgpuTypeClass is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuTypeGetClass(nvmlVgpuTypeId_t vgpuTypeId, char *vgpuTypeClass, unsigned int *size); - -/** - * Retrieve the vGPU type name. - * - * The name is an alphanumeric string that denotes a particular vGPU, e.g. GRID M60-2Q. It will not - * exceed 64 characters in length (including the NUL terminator). See \ref - * nvmlConstants::NVML_DEVICE_NAME_BUFFER_SIZE. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuTypeId Handle to vGPU type - * @param vgpuTypeName Pointer to buffer to return name - * @param size Size of buffer - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a name is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuTypeGetName(nvmlVgpuTypeId_t vgpuTypeId, char *vgpuTypeName, unsigned int *size); - -/** - * Retrieve the device ID of a vGPU type. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuTypeId Handle to vGPU type - * @param deviceID Device ID and vendor ID of the device contained in single 32 bit value - * @param subsystemID Subsytem ID and subsytem vendor ID of the device contained in single 32 bit value - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a deviceId or \a subsystemID are NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuTypeGetDeviceID(nvmlVgpuTypeId_t vgpuTypeId, unsigned long long *deviceID, unsigned long long *subsystemID); - -/** - * Retrieve the vGPU framebuffer size in bytes. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuTypeId Handle to vGPU type - * @param fbSize Pointer to framebuffer size in bytes - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a fbSize is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuTypeGetFramebufferSize(nvmlVgpuTypeId_t vgpuTypeId, unsigned long long *fbSize); - -/** - * Retrieve count of vGPU's supported display heads. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuTypeId Handle to vGPU type - * @param numDisplayHeads Pointer to number of display heads - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a numDisplayHeads is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuTypeGetNumDisplayHeads(nvmlVgpuTypeId_t vgpuTypeId, unsigned int *numDisplayHeads); - -/** - * Retrieve vGPU display head's maximum supported resolution. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuTypeId Handle to vGPU type - * @param displayIndex Zero-based index of display head - * @param xdim Pointer to maximum number of pixels in X dimension - * @param ydim Pointer to maximum number of pixels in Y dimension - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a xdim or \a ydim are NULL, or \a displayIndex - * is out of range. - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuTypeGetResolution(nvmlVgpuTypeId_t vgpuTypeId, unsigned int displayIndex, unsigned int *xdim, unsigned int *ydim); - -/** - * Retrieve license requirements for a vGPU type - * - * The license type and version required to run the specified vGPU type is returned as an alphanumeric string, in the form - * ",", for example "GRID-Virtual-PC,2.0". If a vGPU is runnable with* more than one type of license, - * the licenses are delimited by a semicolon, for example "GRID-Virtual-PC,2.0;GRID-Virtual-WS,2.0;GRID-Virtual-WS-Ext,2.0". - * - * The total length of the returned string will not exceed 128 characters, including the NUL terminator. - * See \ref nvmlVgpuConstants::NVML_GRID_LICENSE_BUFFER_SIZE. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuTypeId Handle to vGPU type - * @param vgpuTypeLicenseString Pointer to buffer to return license info - * @param size Size of \a vgpuTypeLicenseString buffer - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a vgpuTypeLicenseString is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuTypeGetLicense(nvmlVgpuTypeId_t vgpuTypeId, char *vgpuTypeLicenseString, unsigned int size); - -/** - * Retrieve the static frame rate limit value of the vGPU type - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuTypeId Handle to vGPU type - * @param frameRateLimit Reference to return the frame rate limit value - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_NOT_SUPPORTED if frame rate limiter is turned off for the vGPU type - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a frameRateLimit is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuTypeGetFrameRateLimit(nvmlVgpuTypeId_t vgpuTypeId, unsigned int *frameRateLimit); - -/** - * Retrieve the maximum number of vGPU instances creatable on a device for given vGPU type - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param vgpuTypeId Handle to vGPU type - * @param vgpuInstanceCount Pointer to get the max number of vGPU instances - * that can be created on a deicve for given vgpuTypeId - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid or is not supported on target device, - * or \a vgpuInstanceCount is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuTypeGetMaxInstances(nvmlDevice_t device, nvmlVgpuTypeId_t vgpuTypeId, unsigned int *vgpuInstanceCount); - -/** - * Retrieve the active vGPU instances on a device. - * - * An array of active vGPU instances is returned in the caller-supplied buffer pointed at by \a vgpuInstances. The - * array elememt count is passed in \a vgpuCount, and \a vgpuCount is used to return the number of vGPU instances - * written to the buffer. - * - * If the supplied buffer is not large enough to accomodate the vGPU instance array, the function returns - * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlVgpuInstance_t array required in \a vgpuCount. - * To query the number of active vGPU instances, call this function with *vgpuCount = 0. The code will return - * NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if no vGPU Types are supported. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param vgpuCount Pointer which passes in the array size as well as get - * back the number of types - * @param vgpuInstances Pointer to array in which to return list of vGPU instances - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a vgpuCount is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small - * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetActiveVgpus(nvmlDevice_t device, unsigned int *vgpuCount, nvmlVgpuInstance_t *vgpuInstances); - -/** - * Retrieve the VM ID associated with a vGPU instance. - * - * The VM ID is returned as a string, not exceeding 80 characters in length (including the NUL terminator). - * See \ref nvmlConstants::NVML_DEVICE_UUID_BUFFER_SIZE. - * - * The format of the VM ID varies by platform, and is indicated by the type identifier returned in \a vmIdType. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuInstance Identifier of the target vGPU instance - * @param vmId Pointer to caller-supplied buffer to hold VM ID - * @param size Size of buffer in bytes - * @param vmIdType Pointer to hold VM ID type - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is invalid, or \a vmId or \a vmIdType are NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetVmID(nvmlVgpuInstance_t vgpuInstance, char *vmId, unsigned int size, nvmlVgpuVmIdType_t *vmIdType); - -/** - * Retrieve the UUID of a vGPU instance. - * - * The UUID is a globally unique identifier associated with the vGPU, and is returned as a 5-part hexadecimal string, - * not exceeding 80 characters in length (including the NULL terminator). - * See \ref nvmlConstants::NVML_DEVICE_UUID_BUFFER_SIZE. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuInstance Identifier of the target vGPU instance - * @param uuid Pointer to caller-supplied buffer to hold vGPU UUID - * @param size Size of buffer in bytes - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is invalid, or \a uuid is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetUUID(nvmlVgpuInstance_t vgpuInstance, char *uuid, unsigned int size); - -/** - * Retrieve the NVIDIA driver version installed in the VM associated with a vGPU. - * - * The version is returned as an alphanumeric string in the caller-supplied buffer \a version. The length of the version - * string will not exceed 80 characters in length (including the NUL terminator). - * See \ref nvmlConstants::NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE. - * - * nvmlVgpuInstanceGetVmDriverVersion() may be called at any time for a vGPU instance. The guest VM driver version is - * returned as "Unknown" if no NVIDIA driver is installed in the VM, or the VM has not yet booted to the point where the - * NVIDIA driver is loaded and initialized. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuInstance Identifier of the target vGPU instance - * @param version Caller-supplied buffer to return driver version string - * @param length Size of \a version buffer - * - * @return - * - \ref NVML_SUCCESS if \a version has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is invalid - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetVmDriverVersion(nvmlVgpuInstance_t vgpuInstance, char* version, unsigned int length); - -/** - * Retrieve the framebuffer usage in bytes. - * - * Framebuffer usage is the amont of vGPU framebuffer memory that is currently in use by the VM. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuInstance The identifier of the target instance - * @param fbUsage Pointer to framebuffer usage in bytes - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is invalid, or \a fbUsage is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetFbUsage(nvmlVgpuInstance_t vgpuInstance, unsigned long long *fbUsage); - -/** - * Retrieve the current licensing state of the vGPU instance. - * - * If the vGPU is currently licensed, \a licensed is set to 1, otherwise it is set to 0. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuInstance Identifier of the target vGPU instance - * @param licensed Reference to return the licensing status - * - * @return - * - \ref NVML_SUCCESS if \a licensed has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is invalid, or \a licensed is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetLicenseStatus(nvmlVgpuInstance_t vgpuInstance, unsigned int *licensed); - -/** - * Retrieve the vGPU type of a vGPU instance. - * - * Returns the vGPU type ID of vgpu assigned to the vGPU instance. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuInstance Identifier of the target vGPU instance - * @param vgpuTypeId Reference to return the vgpuTypeId - * - * @return - * - \ref NVML_SUCCESS if \a vgpuTypeId has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is invalid, or \a vgpuTypeId is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetType(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuTypeId_t *vgpuTypeId); - -/** - * Retrieve the frame rate limit set for the vGPU instance. - * - * Returns the value of the frame rate limit set for the vGPU instance - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuInstance Identifier of the target vGPU instance - * @param frameRateLimit Reference to return the frame rate limit - * - * @return - * - \ref NVML_SUCCESS if \a frameRateLimit has been set - * - \ref NVML_ERROR_NOT_SUPPORTED if frame rate limiter is turned off for the vGPU type - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is invalid, or \a frameRateLimit is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetFrameRateLimit(nvmlVgpuInstance_t vgpuInstance, unsigned int *frameRateLimit); - -/** - * Retrieve the encoder Capacity of a vGPU instance, in macroblocks per second. - * - * For Maxwell &tm; or newer fully supported devices. - * - * @param vgpuInstance Identifier of the target vGPU instance - * @param encoderCapacity Reference to an unsigned int for the encoder capacity - * - * @return - * - \ref NVML_SUCCESS if \a encoderCapacity has been retrived - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is invalid, or \a encoderQueryType is invalid - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetEncoderCapacity(nvmlVgpuInstance_t vgpuInstance, unsigned int *encoderCapacity); - -/** - * Set the encoder Capacity of a vGPU instance, in macroblocks per second. - * - * For Maxwell &tm; or newer fully supported devices. - * - * @param vgpuInstance Identifier of the target vGPU instance - * @param encoderCapacity Unsigned int for the encoder capacity value - * - * @return - * - \ref NVML_SUCCESS if \a encoderCapacity has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is invalid - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceSetEncoderCapacity(nvmlVgpuInstance_t vgpuInstance, unsigned int encoderCapacity); - -/** - * Retrieves current utilization for vGPUs on a physical GPU (device). - * - * For Kepler &tm; or newer fully supported devices. - * - * Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, and video decoder for vGPU instances running - * on a device. Utilization values are returned as an array of utilization sample structures in the caller-supplied buffer - * pointed at by \a utilizationSamples. One utilization sample structure is returned per vGPU instance, and includes the - * CPU timestamp at which the samples were recorded. Individual utilization values are returned as "unsigned int" values - * in nvmlValue_t unions. The function sets the caller-supplied \a sampleValType to NVML_VALUE_TYPE_UNSIGNED_INT to - * indicate the returned value type. - * - * To read utilization values, first determine the size of buffer required to hold the samples by invoking the function with - * \a utilizationSamples set to NULL. The function will return NVML_ERROR_INSUFFICIENT_SIZE, with the current vGPU instance - * count in \a vgpuInstanceSamplesCount, or NVML_SUCCESS if the current vGPU instance count is zero. The caller should allocate - * a buffer of size vgpuInstanceSamplesCount * sizeof(nvmlVgpuInstanceUtilizationSample_t). Invoke the function again with - * the allocated buffer passed in \a utilizationSamples, and \a vgpuInstanceSamplesCount set to the number of entries the - * buffer is sized for. - * - * On successful return, the function updates \a vgpuInstanceSampleCount with the number of vGPU utilization sample - * structures that were actually written. This may differ from a previously read value as vGPU instances are created or - * destroyed. - * - * lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0 - * to read utilization based on all the samples maintained by the driver's internal sample buffer. Set lastSeenTimeStamp - * to a timeStamp retrieved from a previous query to read utilization since the previous query. - * - * @param device The identifier for the target device - * @param lastSeenTimeStamp Return only samples with timestamp greater than lastSeenTimeStamp. - * @param sampleValType Pointer to caller-supplied buffer to hold the type of returned sample values - * @param vgpuInstanceSamplesCount Pointer to caller-supplied array size, and returns number of vGPU instances - * @param utilizationSamples Pointer to caller-supplied buffer in which vGPU utilization samples are returned - - * @return - * - \ref NVML_SUCCESS if utilization samples are successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a vgpuInstanceSamplesCount or \a sampleValType is - * NULL, or a sample count of 0 is passed with a non-NULL \a utilizationSamples - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if supplied \a vgpuInstanceSamplesCount is too small to return samples for all - * vGPU instances currently executing on the device - * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_NOT_FOUND if sample entries are not found - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetVgpuUtilization(nvmlDevice_t device, unsigned long long lastSeenTimeStamp, - nvmlValueType_t *sampleValType, unsigned int *vgpuInstanceSamplesCount, - nvmlVgpuInstanceUtilizationSample_t *utilizationSamples); - -/** - * Retrieves current utilization for processes running on vGPUs on a physical GPU (device). - * - * For Maxwell &tm; or newer fully supported devices. - * - * Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, and video decoder for processes running on - * vGPU instances active on a device. Utilization values are returned as an array of utilization sample structures in the - * caller-supplied buffer pointed at by \a utilizationSamples. One utilization sample structure is returned per process running - * on vGPU instances, that had some non-zero utilization during the last sample period. It includes the CPU timestamp at which - * the samples were recorded. Individual utilization values are returned as "unsigned int" values. - * - * To read utilization values, first determine the size of buffer required to hold the samples by invoking the function with - * \a utilizationSamples set to NULL. The function will return NVML_ERROR_INSUFFICIENT_SIZE, with the current vGPU instance - * count in \a vgpuProcessSamplesCount. The caller should allocate a buffer of size - * vgpuProcessSamplesCount * sizeof(nvmlVgpuProcessUtilizationSample_t). Invoke the function again with - * the allocated buffer passed in \a utilizationSamples, and \a vgpuProcessSamplesCount set to the number of entries the - * buffer is sized for. - * - * On successful return, the function updates \a vgpuSubProcessSampleCount with the number of vGPU sub process utilization sample - * structures that were actually written. This may differ from a previously read value depending on the number of processes that are active - * in any given sample period. - * - * lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0 - * to read utilization based on all the samples maintained by the driver's internal sample buffer. Set lastSeenTimeStamp - * to a timeStamp retrieved from a previous query to read utilization since the previous query. - * - * @param device The identifier for the target device - * @param lastSeenTimeStamp Return only samples with timestamp greater than lastSeenTimeStamp. - * @param vgpuProcessSamplesCount Pointer to caller-supplied array size, and returns number of processes running on vGPU instances - * @param utilizationSamples Pointer to caller-supplied buffer in which vGPU sub process utilization samples are returned - - * @return - * - \ref NVML_SUCCESS if utilization samples are successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a vgpuProcessSamplesCount or a sample count of 0 is - * passed with a non-NULL \a utilizationSamples - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if supplied \a vgpuProcessSamplesCount is too small to return samples for all - * vGPU instances currently executing on the device - * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_NOT_FOUND if sample entries are not found - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetVgpuProcessUtilization(nvmlDevice_t device, unsigned long long lastSeenTimeStamp, - unsigned int *vgpuProcessSamplesCount, - nvmlVgpuProcessUtilizationSample_t *utilizationSamples); -/** - * Retrieve the GRID licensable features. - * - * Identifies whether the system supports GRID Software Licensing. If it does, return the list of licensable feature(s) - * and their current license status. - * - * @param device Identifier of the target device - * @param pGridLicensableFeatures Pointer to structure in which GRID licensable features are returned - * - * @return - * - \ref NVML_SUCCESS if licensable features are successfully retrieved - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a pGridLicensableFeatures is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetGridLicensableFeatures(nvmlDevice_t device, nvmlGridLicensableFeatures_t *pGridLicensableFeatures); - -/** - * Retrieves the current encoder statistics of a vGPU Instance - * - * For Maxwell &tm; or newer fully supported devices. - * - * @param vgpuInstance Identifier of the target vGPU instance - * @param sessionCount Reference to an unsigned int for count of active encoder sessions - * @param averageFps Reference to an unsigned int for trailing average FPS of all active sessions - * @param averageLatency Reference to an unsigned int for encode latency in microseconds - * - * @return - * - \ref NVML_SUCCESS if \a sessionCount, \a averageFps and \a averageLatency is fetched - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount , or \a averageFps or \a averageLatency is NULL - * or \a vgpuInstance is invalid. - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetEncoderStats(nvmlVgpuInstance_t vgpuInstance, unsigned int *sessionCount, - unsigned int *averageFps, unsigned int *averageLatency); - -/** - * Retrieves information about all active encoder sessions on a vGPU Instance. - * - * An array of active encoder sessions is returned in the caller-supplied buffer pointed at by \a sessionInfo. The - * array elememt count is passed in \a sessionCount, and \a sessionCount is used to return the number of sessions - * written to the buffer. - * - * If the supplied buffer is not large enough to accomodate the active session array, the function returns - * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlEncoderSessionInfo_t array required in \a sessionCount. - * To query the number of active encoder sessions, call this function with *sessionCount = 0. The code will return - * NVML_SUCCESS with number of active encoder sessions updated in *sessionCount. - * - * For Maxwell &tm; or newer fully supported devices. - * - * @param vgpuInstance Identifier of the target vGPU instance - * @param sessionCount Reference to caller supplied array size, and returns - * the number of sessions. - * @param sessionInfo Reference to caller supplied array in which the list - * of session information us returned. - * - * @return - * - \ref NVML_SUCCESS if \a sessionInfo is fetched - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a sessionCount is too small, array element count is - returned in \a sessionCount - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount is NULL or \a vgpuInstance is invalid.. - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetEncoderSessions(nvmlVgpuInstance_t vgpuInstance, unsigned int *sessionCount, nvmlEncoderSessionInfo_t *sessionInfo); - -/** - * Retrieves the current utilization and process ID - * - * For Maxwell &tm; or newer fully supported devices. - * - * Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, and video decoder for processes running. - * Utilization values are returned as an array of utilization sample structures in the caller-supplied buffer pointed at - * by \a utilization. One utilization sample structure is returned per process running, that had some non-zero utilization - * during the last sample period. It includes the CPU timestamp at which the samples were recorded. Individual utilization values - * are returned as "unsigned int" values. - * - * To read utilization values, first determine the size of buffer required to hold the samples by invoking the function with - * \a utilization set to NULL. The caller should allocate a buffer of size - * processSamplesCount * sizeof(nvmlProcessUtilizationSample_t). Invoke the function again with the allocated buffer passed - * in \a utilization, and \a processSamplesCount set to the number of entries the buffer is sized for. - * - * On successful return, the function updates \a processSamplesCount with the number of process utilization sample - * structures that were actually written. This may differ from a previously read value as instances are created or - * destroyed. - * - * lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0 - * to read utilization based on all the samples maintained by the driver's internal sample buffer. Set lastSeenTimeStamp - * to a timeStamp retrieved from a previous query to read utilization since the previous query. - * - * @param device The identifier of the target device - * @param utilization Pointer to caller-supplied buffer in which guest process utilization samples are returned - * @param processSamplesCount Pointer to caller-supplied array size, and returns number of processes running - * @param lastSeenTimeStamp Return only samples with timestamp greater than lastSeenTimeStamp. - - * @return - * - \ref NVML_SUCCESS if \a utilization has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetProcessUtilization(nvmlDevice_t device, nvmlProcessUtilizationSample_t *utilization, - unsigned int *processSamplesCount, unsigned long long lastSeenTimeStamp); - -/** @} */ - -/** - * NVML API versioning support - */ -#if defined(__NVML_API_VERSION_INTERNAL) -#undef nvmlDeviceGetPciInfo -#undef nvmlDeviceGetCount -#undef nvmlDeviceGetHandleByIndex -#undef nvmlDeviceGetHandleByPciBusId -#undef nvmlInit -#endif - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/cluster-autoscaler/vendor/github.com/moby/ipvs/.golangci.yml b/cluster-autoscaler/vendor/github.com/moby/ipvs/.golangci.yml new file mode 100644 index 000000000000..22e099215b38 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/moby/ipvs/.golangci.yml @@ -0,0 +1,8 @@ +linters: + disable-all: true + enable: + - gofmt + - govet + - ineffassign + - misspell + - revive diff --git a/cluster-autoscaler/vendor/github.com/moby/ipvs/LICENSE b/cluster-autoscaler/vendor/github.com/moby/ipvs/LICENSE index e06d2081865a..d64569567334 100644 --- a/cluster-autoscaler/vendor/github.com/moby/ipvs/LICENSE +++ b/cluster-autoscaler/vendor/github.com/moby/ipvs/LICENSE @@ -1,4 +1,5 @@ -Apache License + + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -178,7 +179,7 @@ Apache License APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" + boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -186,7 +187,7 @@ Apache License same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright {yyyy} {name of copyright owner} + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -199,4 +200,3 @@ Apache License WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - diff --git a/cluster-autoscaler/vendor/github.com/moby/ipvs/README.md b/cluster-autoscaler/vendor/github.com/moby/ipvs/README.md index a45cf049a559..2f6d6c61cd2d 100644 --- a/cluster-autoscaler/vendor/github.com/moby/ipvs/README.md +++ b/cluster-autoscaler/vendor/github.com/moby/ipvs/README.md @@ -31,4 +31,5 @@ func main() { Want to hack on ipvs? [Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md) apply. ## Copyright and license -Code and documentation copyright 2015 Docker, inc. Code released under the Apache 2.0 license. Docs released under Creative commons. + +Copyright 2015 Docker, inc. Code released under the [Apache 2.0 license](LICENSE). diff --git a/cluster-autoscaler/vendor/github.com/moby/ipvs/constants.go b/cluster-autoscaler/vendor/github.com/moby/ipvs/constants_linux.go similarity index 93% rename from cluster-autoscaler/vendor/github.com/moby/ipvs/constants.go rename to cluster-autoscaler/vendor/github.com/moby/ipvs/constants_linux.go index efac4e367cf8..0e0561d79f13 100644 --- a/cluster-autoscaler/vendor/github.com/moby/ipvs/constants.go +++ b/cluster-autoscaler/vendor/github.com/moby/ipvs/constants_linux.go @@ -1,5 +1,3 @@ -// +build linux - package ipvs const ( @@ -88,20 +86,20 @@ const ( ipvsDestAttrAddressFamily ) -// IPVS Svc Statistics constancs +// IPVS Statistics constants const ( - ipvsSvcStatsUnspec int = iota - ipvsSvcStatsConns - ipvsSvcStatsPktsIn - ipvsSvcStatsPktsOut - ipvsSvcStatsBytesIn - ipvsSvcStatsBytesOut - ipvsSvcStatsCPS - ipvsSvcStatsPPSIn - ipvsSvcStatsPPSOut - ipvsSvcStatsBPSIn - ipvsSvcStatsBPSOut + ipvsStatsUnspec int = iota + ipvsStatsConns + ipvsStatsPktsIn + ipvsStatsPktsOut + ipvsStatsBytesIn + ipvsStatsBytesOut + ipvsStatsCPS + ipvsStatsPPSIn + ipvsStatsPPSOut + ipvsStatsBPSIn + ipvsStatsBPSOut ) // Destination forwarding methods diff --git a/cluster-autoscaler/vendor/github.com/moby/ipvs/ipvs.go b/cluster-autoscaler/vendor/github.com/moby/ipvs/ipvs_linux.go similarity index 99% rename from cluster-autoscaler/vendor/github.com/moby/ipvs/ipvs.go rename to cluster-autoscaler/vendor/github.com/moby/ipvs/ipvs_linux.go index 61b6f0a5e4e5..686e74658959 100644 --- a/cluster-autoscaler/vendor/github.com/moby/ipvs/ipvs.go +++ b/cluster-autoscaler/vendor/github.com/moby/ipvs/ipvs_linux.go @@ -1,5 +1,3 @@ -// +build linux - package ipvs import ( @@ -181,7 +179,6 @@ func (i *Handle) GetDestinations(s *Service) ([]*Destination, error) { // GetService gets details of a specific IPVS services, useful in updating statisics etc., func (i *Handle) GetService(s *Service) (*Service, error) { - res, err := i.doGetServicesCmd(s) if err != nil { return nil, err diff --git a/cluster-autoscaler/vendor/github.com/moby/ipvs/netlink.go b/cluster-autoscaler/vendor/github.com/moby/ipvs/netlink_linux.go similarity index 93% rename from cluster-autoscaler/vendor/github.com/moby/ipvs/netlink.go rename to cluster-autoscaler/vendor/github.com/moby/ipvs/netlink_linux.go index ac48d05a31d7..711d8051eab0 100644 --- a/cluster-autoscaler/vendor/github.com/moby/ipvs/netlink.go +++ b/cluster-autoscaler/vendor/github.com/moby/ipvs/netlink_linux.go @@ -1,5 +1,3 @@ -// +build linux - package ipvs import ( @@ -124,8 +122,8 @@ func (i *Handle) doCmdwithResponse(s *Service, d *Destination, cmd uint8) ([][]b req.Seq = atomic.AddUint32(&i.seq, 1) if s == nil { - req.Flags |= syscall.NLM_F_DUMP //Flag to dump all messages - req.AddData(nl.NewRtAttr(ipvsCmdAttrService, nil)) //Add a dummy attribute + req.Flags |= syscall.NLM_F_DUMP // Flag to dump all messages + req.AddData(nl.NewRtAttr(ipvsCmdAttrService, nil)) // Add a dummy attribute } else { req.AddData(fillService(s)) } @@ -134,7 +132,6 @@ func (i *Handle) doCmdwithResponse(s *Service, d *Destination, cmd uint8) ([][]b if cmd == ipvsCmdGetDest { req.Flags |= syscall.NLM_F_DUMP } - } else { req.AddData(fillDestination(d)) } @@ -259,7 +256,6 @@ done: } func parseIP(ip []byte, family uint16) (net.IP, error) { - var resIP net.IP switch family { @@ -276,7 +272,6 @@ func parseIP(ip []byte, family uint16) (net.IP, error) { // parseStats func assembleStats(msg []byte) (SvcStats, error) { - var s SvcStats attrs, err := nl.ParseRouteAttr(msg) @@ -287,25 +282,25 @@ func assembleStats(msg []byte) (SvcStats, error) { for _, attr := range attrs { attrType := int(attr.Attr.Type) switch attrType { - case ipvsSvcStatsConns: + case ipvsStatsConns: s.Connections = native.Uint32(attr.Value) - case ipvsSvcStatsPktsIn: + case ipvsStatsPktsIn: s.PacketsIn = native.Uint32(attr.Value) - case ipvsSvcStatsPktsOut: + case ipvsStatsPktsOut: s.PacketsOut = native.Uint32(attr.Value) - case ipvsSvcStatsBytesIn: + case ipvsStatsBytesIn: s.BytesIn = native.Uint64(attr.Value) - case ipvsSvcStatsBytesOut: + case ipvsStatsBytesOut: s.BytesOut = native.Uint64(attr.Value) - case ipvsSvcStatsCPS: + case ipvsStatsCPS: s.CPS = native.Uint32(attr.Value) - case ipvsSvcStatsPPSIn: + case ipvsStatsPPSIn: s.PPSIn = native.Uint32(attr.Value) - case ipvsSvcStatsPPSOut: + case ipvsStatsPPSOut: s.PPSOut = native.Uint32(attr.Value) - case ipvsSvcStatsBPSIn: + case ipvsStatsBPSIn: s.BPSIn = native.Uint32(attr.Value) - case ipvsSvcStatsBPSOut: + case ipvsStatsBPSOut: s.BPSOut = native.Uint32(attr.Value) } } @@ -314,7 +309,6 @@ func assembleStats(msg []byte) (SvcStats, error) { // assembleService assembles a services back from a hain of netlink attributes func assembleService(attrs []syscall.NetlinkRouteAttr) (*Service, error) { - var s Service var addressBytes []byte @@ -366,10 +360,9 @@ func assembleService(attrs []syscall.NetlinkRouteAttr) (*Service, error) { // parseService given a ipvs netlink response this function will respond with a valid service entry, an error otherwise func (i *Handle) parseService(msg []byte) (*Service, error) { - var s *Service - //Remove General header for this message and parse the NetLink message + // Remove General header for this message and parse the NetLink message hdr := deserializeGenlMsg(msg) NetLinkAttrs, err := nl.ParseRouteAttr(msg[hdr.Len():]) if err != nil { @@ -379,13 +372,13 @@ func (i *Handle) parseService(msg []byte) (*Service, error) { return nil, fmt.Errorf("error no valid netlink message found while parsing service record") } - //Now Parse and get IPVS related attributes messages packed in this message. + // Now Parse and get IPVS related attributes messages packed in this message. ipvsAttrs, err := nl.ParseRouteAttr(NetLinkAttrs[0].Value) if err != nil { return nil, err } - //Assemble all the IPVS related attribute messages and create a service record + // Assemble all the IPVS related attribute messages and create a service record s, err = assembleService(ipvsAttrs) if err != nil { return nil, err @@ -422,7 +415,6 @@ func (i *Handle) doCmdWithoutAttr(cmd uint8) ([][]byte, error) { } func assembleDestination(attrs []syscall.NetlinkRouteAttr) (*Destination, error) { - var d Destination var addressBytes []byte @@ -447,9 +439,9 @@ func assembleDestination(attrs []syscall.NetlinkRouteAttr) (*Destination, error) case ipvsDestAttrLowerThreshold: d.LowerThreshold = native.Uint32(attr.Value) case ipvsDestAttrActiveConnections: - d.ActiveConnections = int(native.Uint16(attr.Value)) + d.ActiveConnections = int(native.Uint32(attr.Value)) case ipvsDestAttrInactiveConnections: - d.InactiveConnections = int(native.Uint16(attr.Value)) + d.InactiveConnections = int(native.Uint32(attr.Value)) case ipvsDestAttrStats: stats, err := assembleStats(attr.Value) if err != nil { @@ -486,9 +478,12 @@ func assembleDestination(attrs []syscall.NetlinkRouteAttr) (*Destination, error) // getIPFamily parses the IP family based on raw data from netlink. // For AF_INET, netlink will set the first 4 bytes with trailing zeros -// 10.0.0.1 -> [10 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0] +// +// 10.0.0.1 -> [10 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0] +// // For AF_INET6, the full 16 byte array is used: -// 2001:db8:3c4d:15::1a00 -> [32 1 13 184 60 77 0 21 0 0 0 0 0 0 26 0] +// +// 2001:db8:3c4d:15::1a00 -> [32 1 13 184 60 77 0 21 0 0 0 0 0 0 26 0] func getIPFamily(address []byte) (uint16, error) { if len(address) == 4 { return syscall.AF_INET, nil @@ -519,7 +514,7 @@ func isZeros(b []byte) bool { func (i *Handle) parseDestination(msg []byte) (*Destination, error) { var dst *Destination - //Remove General header for this message + // Remove General header for this message hdr := deserializeGenlMsg(msg) NetLinkAttrs, err := nl.ParseRouteAttr(msg[hdr.Len():]) if err != nil { @@ -529,13 +524,13 @@ func (i *Handle) parseDestination(msg []byte) (*Destination, error) { return nil, fmt.Errorf("error no valid netlink message found while parsing destination record") } - //Now Parse and get IPVS related attributes messages packed in this message. + // Now Parse and get IPVS related attributes messages packed in this message. ipvsAttrs, err := nl.ParseRouteAttr(NetLinkAttrs[0].Value) if err != nil { return nil, err } - //Assemble netlink attributes and create a Destination record + // Assemble netlink attributes and create a Destination record dst, err = assembleDestination(ipvsAttrs) if err != nil { return nil, err @@ -546,7 +541,6 @@ func (i *Handle) parseDestination(msg []byte) (*Destination, error) { // doGetDestinationsCmd a wrapper function to be used by GetDestinations and GetDestination(d) apis func (i *Handle) doGetDestinationsCmd(s *Service, d *Destination) ([]*Destination, error) { - var res []*Destination msgs, err := i.doCmdwithResponse(s, d, ipvsCmdGetDest) @@ -568,7 +562,7 @@ func (i *Handle) doGetDestinationsCmd(s *Service, d *Destination) ([]*Destinatio func (i *Handle) parseConfig(msg []byte) (*Config, error) { var c Config - //Remove General header for this message + // Remove General header for this message hdr := deserializeGenlMsg(msg) attrs, err := nl.ParseRouteAttr(msg[hdr.Len():]) if err != nil { diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go b/cluster-autoscaler/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go index 6a7a91e55969..068edd052c17 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go @@ -12,10 +12,12 @@ type Spec struct { Root *Root `json:"root,omitempty"` // Hostname configures the container's hostname. Hostname string `json:"hostname,omitempty"` + // Domainname configures the container's domainname. + Domainname string `json:"domainname,omitempty"` // Mounts configures additional mounts (on top of Root). Mounts []Mount `json:"mounts,omitempty"` // Hooks configures callbacks for container lifecycle events. - Hooks *Hooks `json:"hooks,omitempty" platform:"linux,solaris"` + Hooks *Hooks `json:"hooks,omitempty" platform:"linux,solaris,zos"` // Annotations contains arbitrary metadata for the container. Annotations map[string]string `json:"annotations,omitempty"` @@ -27,6 +29,8 @@ type Spec struct { Windows *Windows `json:"windows,omitempty" platform:"windows"` // VM specifies configuration for virtual-machine-based containers. VM *VM `json:"vm,omitempty" platform:"vm"` + // ZOS is platform-specific configuration for z/OS based containers. + ZOS *ZOS `json:"zos,omitempty" platform:"zos"` } // Process contains information to start a specific application inside the container. @@ -49,7 +53,7 @@ type Process struct { // Capabilities are Linux capabilities that are kept for the process. Capabilities *LinuxCapabilities `json:"capabilities,omitempty" platform:"linux"` // Rlimits specifies rlimit options to apply to the process. - Rlimits []POSIXRlimit `json:"rlimits,omitempty" platform:"linux,solaris"` + Rlimits []POSIXRlimit `json:"rlimits,omitempty" platform:"linux,solaris,zos"` // NoNewPrivileges controls whether additional privileges could be gained by processes in the container. NoNewPrivileges bool `json:"noNewPrivileges,omitempty" platform:"linux"` // ApparmorProfile specifies the apparmor profile for the container. @@ -86,11 +90,11 @@ type Box struct { // User specifies specific user (and group) information for the container process. type User struct { // UID is the user id. - UID uint32 `json:"uid" platform:"linux,solaris"` + UID uint32 `json:"uid" platform:"linux,solaris,zos"` // GID is the group id. - GID uint32 `json:"gid" platform:"linux,solaris"` + GID uint32 `json:"gid" platform:"linux,solaris,zos"` // Umask is the umask for the init process. - Umask *uint32 `json:"umask,omitempty" platform:"linux,solaris"` + Umask *uint32 `json:"umask,omitempty" platform:"linux,solaris,zos"` // AdditionalGids are additional group ids set for the container's process. AdditionalGids []uint32 `json:"additionalGids,omitempty" platform:"linux,solaris"` // Username is the user name. @@ -110,11 +114,16 @@ type Mount struct { // Destination is the absolute path where the mount will be placed in the container. Destination string `json:"destination"` // Type specifies the mount kind. - Type string `json:"type,omitempty" platform:"linux,solaris"` + Type string `json:"type,omitempty" platform:"linux,solaris,zos"` // Source specifies the source path of the mount. Source string `json:"source,omitempty"` // Options are fstab style mount options. Options []string `json:"options,omitempty"` + + // UID/GID mappings used for changing file owners w/o calling chown, fs should support it. + // Every mount point could have its own mapping. + UIDMappings []LinuxIDMapping `json:"uidMappings,omitempty" platform:"linux"` + GIDMappings []LinuxIDMapping `json:"gidMappings,omitempty" platform:"linux"` } // Hook specifies a command that is run at a particular event in the lifecycle of a container @@ -178,7 +187,7 @@ type Linux struct { // MountLabel specifies the selinux context for the mounts in the container. MountLabel string `json:"mountLabel,omitempty"` // IntelRdt contains Intel Resource Director Technology (RDT) information for - // handling resource constraints (e.g., L3 cache, memory bandwidth) for the container + // handling resource constraints and monitoring metrics (e.g., L3 cache, memory bandwidth) for the container IntelRdt *LinuxIntelRdt `json:"intelRdt,omitempty"` // Personality contains configuration for the Linux personality syscall Personality *LinuxPersonality `json:"personality,omitempty"` @@ -250,8 +259,8 @@ type LinuxInterfacePriority struct { Priority uint32 `json:"priority"` } -// linuxBlockIODevice holds major:minor format supported in blkio cgroup -type linuxBlockIODevice struct { +// LinuxBlockIODevice holds major:minor format supported in blkio cgroup +type LinuxBlockIODevice struct { // Major is the device's major number. Major int64 `json:"major"` // Minor is the device's minor number. @@ -260,7 +269,7 @@ type linuxBlockIODevice struct { // LinuxWeightDevice struct holds a `major:minor weight` pair for weightDevice type LinuxWeightDevice struct { - linuxBlockIODevice + LinuxBlockIODevice // Weight is the bandwidth rate for the device. Weight *uint16 `json:"weight,omitempty"` // LeafWeight is the bandwidth rate for the device while competing with the cgroup's child cgroups, CFQ scheduler only @@ -269,7 +278,7 @@ type LinuxWeightDevice struct { // LinuxThrottleDevice struct holds a `major:minor rate_per_second` pair type LinuxThrottleDevice struct { - linuxBlockIODevice + LinuxBlockIODevice // Rate is the IO rate limit per cgroup per device Rate uint64 `json:"rate"` } @@ -310,6 +319,10 @@ type LinuxMemory struct { DisableOOMKiller *bool `json:"disableOOMKiller,omitempty"` // Enables hierarchical memory accounting UseHierarchy *bool `json:"useHierarchy,omitempty"` + // CheckBeforeUpdate enables checking if a new memory limit is lower + // than the current usage during update, and if so, rejecting the new + // limit. + CheckBeforeUpdate *bool `json:"checkBeforeUpdate,omitempty"` } // LinuxCPU for Linux cgroup 'cpu' resource management @@ -328,6 +341,8 @@ type LinuxCPU struct { Cpus string `json:"cpus,omitempty"` // List of memory nodes in the cpuset. Default is to use any available memory node. Mems string `json:"mems,omitempty"` + // cgroups are configured with minimum weight, 0: default behavior, 1: SCHED_IDLE. + Idle *int64 `json:"idle,omitempty"` } // LinuxPids for Linux cgroup 'pids' resource management (Linux 4.3) @@ -522,11 +537,21 @@ type WindowsMemoryResources struct { // WindowsCPUResources contains CPU resource management settings. type WindowsCPUResources struct { - // Number of CPUs available to the container. + // Count is the number of CPUs available to the container. It represents the + // fraction of the configured processor `count` in a container in relation + // to the processors available in the host. The fraction ultimately + // determines the portion of processor cycles that the threads in a + // container can use during each scheduling interval, as the number of + // cycles per 10,000 cycles. Count *uint64 `json:"count,omitempty"` - // CPU shares (relative weight to other containers with cpu shares). + // Shares limits the share of processor time given to the container relative + // to other workloads on the processor. The processor `shares` (`weight` at + // the platform level) is a value between 0 and 10000. Shares *uint16 `json:"shares,omitempty"` - // Specifies the portion of processor cycles that this container can use as a percentage times 100. + // Maximum determines the portion of processor cycles that the threads in a + // container can use during each scheduling interval, as the number of + // cycles per 10,000 cycles. Set processor `maximum` to a percentage times + // 100. Maximum *uint16 `json:"maximum,omitempty"` } @@ -613,6 +638,23 @@ type Arch string // LinuxSeccompFlag is a flag to pass to seccomp(2). type LinuxSeccompFlag string +const ( + // LinuxSeccompFlagLog is a seccomp flag to request all returned + // actions except SECCOMP_RET_ALLOW to be logged. An administrator may + // override this filter flag by preventing specific actions from being + // logged via the /proc/sys/kernel/seccomp/actions_logged file. (since + // Linux 4.14) + LinuxSeccompFlagLog LinuxSeccompFlag = "SECCOMP_FILTER_FLAG_LOG" + + // LinuxSeccompFlagSpecAllow can be used to disable Speculative Store + // Bypass mitigation. (since Linux 4.17) + LinuxSeccompFlagSpecAllow LinuxSeccompFlag = "SECCOMP_FILTER_FLAG_SPEC_ALLOW" + + // LinuxSeccompFlagWaitKillableRecv can be used to switch to the wait + // killable semantics. (since Linux 5.19) + LinuxSeccompFlagWaitKillableRecv LinuxSeccompFlag = "SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV" +) + // Additional architectures permitted to be used for system calls // By default only the native architecture of the kernel is permitted const ( @@ -683,8 +725,9 @@ type LinuxSyscall struct { Args []LinuxSeccompArg `json:"args,omitempty"` } -// LinuxIntelRdt has container runtime resource constraints for Intel RDT -// CAT and MBA features which introduced in Linux 4.10 and 4.12 kernel +// LinuxIntelRdt has container runtime resource constraints for Intel RDT CAT and MBA +// features and flags enabling Intel RDT CMT and MBM features. +// Intel RDT features are available in Linux 4.14 and newer kernel versions. type LinuxIntelRdt struct { // The identity for RDT Class of Service ClosID string `json:"closID,omitempty"` @@ -697,4 +740,36 @@ type LinuxIntelRdt struct { // The unit of memory bandwidth is specified in "percentages" by // default, and in "MBps" if MBA Software Controller is enabled. MemBwSchema string `json:"memBwSchema,omitempty"` + + // EnableCMT is the flag to indicate if the Intel RDT CMT is enabled. CMT (Cache Monitoring Technology) supports monitoring of + // the last-level cache (LLC) occupancy for the container. + EnableCMT bool `json:"enableCMT,omitempty"` + + // EnableMBM is the flag to indicate if the Intel RDT MBM is enabled. MBM (Memory Bandwidth Monitoring) supports monitoring of + // total and local memory bandwidth for the container. + EnableMBM bool `json:"enableMBM,omitempty"` +} + +// ZOS contains platform-specific configuration for z/OS based containers. +type ZOS struct { + // Devices are a list of device nodes that are created for the container + Devices []ZOSDevice `json:"devices,omitempty"` +} + +// ZOSDevice represents the mknod information for a z/OS special device file +type ZOSDevice struct { + // Path to the device. + Path string `json:"path"` + // Device type, block, char, etc. + Type string `json:"type"` + // Major is the device's major number. + Major int64 `json:"major"` + // Minor is the device's minor number. + Minor int64 `json:"minor"` + // FileMode permission bits for the device. + FileMode *os.FileMode `json:"fileMode,omitempty"` + // UID of the device. + UID *uint32 `json:"uid,omitempty"` + // Gid of the device. + GID *uint32 `json:"gid,omitempty"` } diff --git a/cluster-autoscaler/vendor/github.com/sirupsen/logrus/README.md b/cluster-autoscaler/vendor/github.com/sirupsen/logrus/README.md index 5152b6aa406f..b042c896f25b 100644 --- a/cluster-autoscaler/vendor/github.com/sirupsen/logrus/README.md +++ b/cluster-autoscaler/vendor/github.com/sirupsen/logrus/README.md @@ -1,4 +1,4 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus) +# Logrus :walrus: [![Build Status](https://github.com/sirupsen/logrus/workflows/CI/badge.svg)](https://github.com/sirupsen/logrus/actions?query=workflow%3ACI) [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![Go Reference](https://pkg.go.dev/badge/github.com/sirupsen/logrus.svg)](https://pkg.go.dev/github.com/sirupsen/logrus) Logrus is a structured logger for Go (golang), completely API compatible with the standard library logger. @@ -341,7 +341,7 @@ import ( log "github.com/sirupsen/logrus" ) -init() { +func init() { // do something here to set environment depending on an environment variable // or command-line flag if Environment == "production" { diff --git a/cluster-autoscaler/vendor/github.com/sirupsen/logrus/buffer_pool.go b/cluster-autoscaler/vendor/github.com/sirupsen/logrus/buffer_pool.go index 4545dec07d8b..c7787f77cbfa 100644 --- a/cluster-autoscaler/vendor/github.com/sirupsen/logrus/buffer_pool.go +++ b/cluster-autoscaler/vendor/github.com/sirupsen/logrus/buffer_pool.go @@ -26,15 +26,6 @@ func (p *defaultPool) Get() *bytes.Buffer { return p.pool.Get().(*bytes.Buffer) } -func getBuffer() *bytes.Buffer { - return bufferPool.Get() -} - -func putBuffer(buf *bytes.Buffer) { - buf.Reset() - bufferPool.Put(buf) -} - // SetBufferPool allows to replace the default logrus buffer pool // to better meets the specific needs of an application. func SetBufferPool(bp BufferPool) { diff --git a/cluster-autoscaler/vendor/github.com/sirupsen/logrus/entry.go b/cluster-autoscaler/vendor/github.com/sirupsen/logrus/entry.go index 07a1e5fa7249..71cdbbc35d21 100644 --- a/cluster-autoscaler/vendor/github.com/sirupsen/logrus/entry.go +++ b/cluster-autoscaler/vendor/github.com/sirupsen/logrus/entry.go @@ -232,6 +232,7 @@ func (entry *Entry) log(level Level, msg string) { newEntry.Logger.mu.Lock() reportCaller := newEntry.Logger.ReportCaller + bufPool := newEntry.getBufferPool() newEntry.Logger.mu.Unlock() if reportCaller { @@ -239,11 +240,11 @@ func (entry *Entry) log(level Level, msg string) { } newEntry.fireHooks() - - buffer = getBuffer() + buffer = bufPool.Get() defer func() { newEntry.Buffer = nil - putBuffer(buffer) + buffer.Reset() + bufPool.Put(buffer) }() buffer.Reset() newEntry.Buffer = buffer @@ -260,6 +261,13 @@ func (entry *Entry) log(level Level, msg string) { } } +func (entry *Entry) getBufferPool() (pool BufferPool) { + if entry.Logger.BufferPool != nil { + return entry.Logger.BufferPool + } + return bufferPool +} + func (entry *Entry) fireHooks() { var tmpHooks LevelHooks entry.Logger.mu.Lock() @@ -276,18 +284,21 @@ func (entry *Entry) fireHooks() { } func (entry *Entry) write() { + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() serialized, err := entry.Logger.Formatter.Format(entry) if err != nil { fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) return } - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() if _, err := entry.Logger.Out.Write(serialized); err != nil { fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) } } +// Log will log a message at the level given as parameter. +// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit. +// For this behaviour Entry.Panic or Entry.Fatal should be used instead. func (entry *Entry) Log(level Level, args ...interface{}) { if entry.Logger.IsLevelEnabled(level) { entry.log(level, fmt.Sprint(args...)) diff --git a/cluster-autoscaler/vendor/github.com/sirupsen/logrus/logger.go b/cluster-autoscaler/vendor/github.com/sirupsen/logrus/logger.go index 337704457a28..5ff0aef6d3f1 100644 --- a/cluster-autoscaler/vendor/github.com/sirupsen/logrus/logger.go +++ b/cluster-autoscaler/vendor/github.com/sirupsen/logrus/logger.go @@ -44,6 +44,9 @@ type Logger struct { entryPool sync.Pool // Function to exit the application, defaults to `os.Exit()` ExitFunc exitFunc + // The buffer pool used to format the log. If it is nil, the default global + // buffer pool will be used. + BufferPool BufferPool } type exitFunc func(int) @@ -192,6 +195,9 @@ func (logger *Logger) Panicf(format string, args ...interface{}) { logger.Logf(PanicLevel, format, args...) } +// Log will log a message at the level given as parameter. +// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit. +// For this behaviour Logger.Panic or Logger.Fatal should be used instead. func (logger *Logger) Log(level Level, args ...interface{}) { if logger.IsLevelEnabled(level) { entry := logger.newEntry() @@ -402,3 +408,10 @@ func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { logger.mu.Unlock() return oldHooks } + +// SetBufferPool sets the logger buffer pool. +func (logger *Logger) SetBufferPool(pool BufferPool) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.BufferPool = pool +} diff --git a/cluster-autoscaler/vendor/github.com/stretchr/objx/Taskfile.yml b/cluster-autoscaler/vendor/github.com/stretchr/objx/Taskfile.yml index a749ac5492e5..7746f516da20 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/objx/Taskfile.yml +++ b/cluster-autoscaler/vendor/github.com/stretchr/objx/Taskfile.yml @@ -25,6 +25,6 @@ tasks: - go test -race ./... test-coverage: - desc: Runs go tests and calucates test coverage + desc: Runs go tests and calculates test coverage cmds: - go test -race -coverprofile=c.out ./... diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netns/README.md b/cluster-autoscaler/vendor/github.com/vishvananda/netns/README.md index 6b45cfb892e5..91057c10a5ee 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netns/README.md +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netns/README.md @@ -23,6 +23,7 @@ import ( "fmt" "net" "runtime" + "github.com/vishvananda/netns" ) @@ -48,3 +49,14 @@ func main() { } ``` + +## NOTE + +The library can be safely used only with Go >= 1.10 due to [golang/go#20676](https://github.com/golang/go/issues/20676). + +After locking a goroutine to its current OS thread with `runtime.LockOSThread()` +and changing its network namespace, any new subsequent goroutine won't be +scheduled on that thread while it's locked. Therefore, the new goroutine +will run in a different namespace leading to unexpected results. + +See [here](https://www.weave.works/blog/linux-namespaces-golang-followup) for more details. diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netns/doc.go b/cluster-autoscaler/vendor/github.com/vishvananda/netns/doc.go new file mode 100644 index 000000000000..cd4093a4d7e9 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netns/doc.go @@ -0,0 +1,9 @@ +// Package netns allows ultra-simple network namespace handling. NsHandles +// can be retrieved and set. Note that the current namespace is thread +// local so actions that set and reset namespaces should use LockOSThread +// to make sure the namespace doesn't change due to a goroutine switch. +// It is best to close NsHandles when you are done with them. This can be +// accomplished via a `defer ns.Close()` on the handle. Changing namespaces +// requires elevated privileges, so in most cases this code needs to be run +// as root. +package netns diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netns/netns_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netns/netns_linux.go index c76acd087020..ae0558d2903d 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netns/netns_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netns/netns_linux.go @@ -1,5 +1,3 @@ -// +build linux - package netns import ( @@ -10,24 +8,25 @@ import ( "path/filepath" "strconv" "strings" - "syscall" "golang.org/x/sys/unix" ) -// Deprecated: use syscall pkg instead (go >= 1.5 needed). +// Deprecated: use golang.org/x/sys/unix pkg instead. const ( - CLONE_NEWUTS = 0x04000000 /* New utsname group? */ - CLONE_NEWIPC = 0x08000000 /* New ipcs */ - CLONE_NEWUSER = 0x10000000 /* New user namespace */ - CLONE_NEWPID = 0x20000000 /* New pid namespace */ - CLONE_NEWNET = 0x40000000 /* New network namespace */ - CLONE_IO = 0x80000000 /* Get io context */ - bindMountPath = "/run/netns" /* Bind mount path for named netns */ + CLONE_NEWUTS = unix.CLONE_NEWUTS /* New utsname group? */ + CLONE_NEWIPC = unix.CLONE_NEWIPC /* New ipcs */ + CLONE_NEWUSER = unix.CLONE_NEWUSER /* New user namespace */ + CLONE_NEWPID = unix.CLONE_NEWPID /* New pid namespace */ + CLONE_NEWNET = unix.CLONE_NEWNET /* New network namespace */ + CLONE_IO = unix.CLONE_IO /* Get io context */ ) -// Setns sets namespace using syscall. Note that this should be a method -// in syscall but it has not been added. +const bindMountPath = "/run/netns" /* Bind mount path for named netns */ + +// Setns sets namespace using golang.org/x/sys/unix.Setns. +// +// Deprecated: Use golang.org/x/sys/unix.Setns instead. func Setns(ns NsHandle, nstype int) (err error) { return unix.Setns(int(ns), nstype) } @@ -35,19 +34,20 @@ func Setns(ns NsHandle, nstype int) (err error) { // Set sets the current network namespace to the namespace represented // by NsHandle. func Set(ns NsHandle) (err error) { - return Setns(ns, CLONE_NEWNET) + return unix.Setns(int(ns), unix.CLONE_NEWNET) } // New creates a new network namespace, sets it as current and returns // a handle to it. func New() (ns NsHandle, err error) { - if err := unix.Unshare(CLONE_NEWNET); err != nil { + if err := unix.Unshare(unix.CLONE_NEWNET); err != nil { return -1, err } return Get() } -// NewNamed creates a new named network namespace and returns a handle to it +// NewNamed creates a new named network namespace, sets it as current, +// and returns a handle to it func NewNamed(name string) (NsHandle, error) { if _, err := os.Stat(bindMountPath); os.IsNotExist(err) { err = os.MkdirAll(bindMountPath, 0755) @@ -65,13 +65,15 @@ func NewNamed(name string) (NsHandle, error) { f, err := os.OpenFile(namedPath, os.O_CREATE|os.O_EXCL, 0444) if err != nil { + newNs.Close() return None(), err } f.Close() - nsPath := fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), syscall.Gettid()) - err = syscall.Mount(nsPath, namedPath, "bind", syscall.MS_BIND, "") + nsPath := fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), unix.Gettid()) + err = unix.Mount(nsPath, namedPath, "bind", unix.MS_BIND, "") if err != nil { + newNs.Close() return None(), err } @@ -82,7 +84,7 @@ func NewNamed(name string) (NsHandle, error) { func DeleteNamed(name string) error { namedPath := path.Join(bindMountPath, name) - err := syscall.Unmount(namedPath, syscall.MNT_DETACH) + err := unix.Unmount(namedPath, unix.MNT_DETACH) if err != nil { return err } @@ -108,7 +110,7 @@ func GetFromPath(path string) (NsHandle, error) { // GetFromName gets a handle to a named network namespace such as one // created by `ip netns add`. func GetFromName(name string) (NsHandle, error) { - return GetFromPath(fmt.Sprintf("/var/run/netns/%s", name)) + return GetFromPath(filepath.Join(bindMountPath, name)) } // GetFromPid gets a handle to the network namespace of a given pid. @@ -133,32 +135,37 @@ func GetFromDocker(id string) (NsHandle, error) { } // borrowed from docker/utils/utils.go -func findCgroupMountpoint(cgroupType string) (string, error) { +func findCgroupMountpoint(cgroupType string) (int, string, error) { output, err := ioutil.ReadFile("/proc/mounts") if err != nil { - return "", err + return -1, "", err } // /proc/mounts has 6 fields per line, one mount per line, e.g. // cgroup /sys/fs/cgroup/devices cgroup rw,relatime,devices 0 0 for _, line := range strings.Split(string(output), "\n") { parts := strings.Split(line, " ") - if len(parts) == 6 && parts[2] == "cgroup" { - for _, opt := range strings.Split(parts[3], ",") { - if opt == cgroupType { - return parts[1], nil + if len(parts) == 6 { + switch parts[2] { + case "cgroup2": + return 2, parts[1], nil + case "cgroup": + for _, opt := range strings.Split(parts[3], ",") { + if opt == cgroupType { + return 1, parts[1], nil + } } } } } - return "", fmt.Errorf("cgroup mountpoint not found for %s", cgroupType) + return -1, "", fmt.Errorf("cgroup mountpoint not found for %s", cgroupType) } // Returns the relative path to the cgroup docker is running in. // borrowed from docker/utils/utils.go // modified to get the docker pid instead of using /proc/self -func getThisCgroup(cgroupType string) (string, error) { +func getDockerCgroup(cgroupVer int, cgroupType string) (string, error) { dockerpid, err := ioutil.ReadFile("/var/run/docker.pid") if err != nil { return "", err @@ -178,7 +185,8 @@ func getThisCgroup(cgroupType string) (string, error) { for _, line := range strings.Split(string(output), "\n") { parts := strings.Split(line, ":") // any type used by docker should work - if parts[1] == cgroupType { + if (cgroupVer == 1 && parts[1] == cgroupType) || + (cgroupVer == 2 && parts[1] == "") { return parts[2], nil } } @@ -190,40 +198,56 @@ func getThisCgroup(cgroupType string) (string, error) { // modified to only return the first pid // modified to glob with id // modified to search for newer docker containers +// modified to look for cgroups v2 func getPidForContainer(id string) (int, error) { pid := 0 // memory is chosen randomly, any cgroup used by docker works cgroupType := "memory" - cgroupRoot, err := findCgroupMountpoint(cgroupType) + cgroupVer, cgroupRoot, err := findCgroupMountpoint(cgroupType) if err != nil { return pid, err } - cgroupThis, err := getThisCgroup(cgroupType) + cgroupDocker, err := getDockerCgroup(cgroupVer, cgroupType) if err != nil { return pid, err } id += "*" + var pidFile string + if cgroupVer == 1 { + pidFile = "tasks" + } else if cgroupVer == 2 { + pidFile = "cgroup.procs" + } else { + return -1, fmt.Errorf("Invalid cgroup version '%d'", cgroupVer) + } + attempts := []string{ - filepath.Join(cgroupRoot, cgroupThis, id, "tasks"), + filepath.Join(cgroupRoot, cgroupDocker, id, pidFile), // With more recent lxc versions use, cgroup will be in lxc/ - filepath.Join(cgroupRoot, cgroupThis, "lxc", id, "tasks"), + filepath.Join(cgroupRoot, cgroupDocker, "lxc", id, pidFile), // With more recent docker, cgroup will be in docker/ - filepath.Join(cgroupRoot, cgroupThis, "docker", id, "tasks"), + filepath.Join(cgroupRoot, cgroupDocker, "docker", id, pidFile), // Even more recent docker versions under systemd use docker-.scope/ - filepath.Join(cgroupRoot, "system.slice", "docker-"+id+".scope", "tasks"), + filepath.Join(cgroupRoot, "system.slice", "docker-"+id+".scope", pidFile), // Even more recent docker versions under cgroup/systemd/docker// - filepath.Join(cgroupRoot, "..", "systemd", "docker", id, "tasks"), - // Kubernetes with docker and CNI is even more different - filepath.Join(cgroupRoot, "..", "systemd", "kubepods", "*", "pod*", id, "tasks"), - // Another flavor of containers location in recent kubernetes 1.11+ - filepath.Join(cgroupRoot, cgroupThis, "kubepods.slice", "kubepods-besteffort.slice", "*", "docker-"+id+".scope", "tasks"), - // When runs inside of a container with recent kubernetes 1.11+ - filepath.Join(cgroupRoot, "kubepods.slice", "kubepods-besteffort.slice", "*", "docker-"+id+".scope", "tasks"), + filepath.Join(cgroupRoot, "..", "systemd", "docker", id, pidFile), + // Kubernetes with docker and CNI is even more different. Works for BestEffort and Burstable QoS + filepath.Join(cgroupRoot, "..", "systemd", "kubepods", "*", "pod*", id, pidFile), + // Same as above but for Guaranteed QoS + filepath.Join(cgroupRoot, "..", "systemd", "kubepods", "pod*", id, pidFile), + // Another flavor of containers location in recent kubernetes 1.11+. Works for BestEffort and Burstable QoS + filepath.Join(cgroupRoot, cgroupDocker, "kubepods.slice", "*.slice", "*", "docker-"+id+".scope", pidFile), + // Same as above but for Guaranteed QoS + filepath.Join(cgroupRoot, cgroupDocker, "kubepods.slice", "*", "docker-"+id+".scope", pidFile), + // When runs inside of a container with recent kubernetes 1.11+. Works for BestEffort and Burstable QoS + filepath.Join(cgroupRoot, "kubepods.slice", "*.slice", "*", "docker-"+id+".scope", pidFile), + // Same as above but for Guaranteed QoS + filepath.Join(cgroupRoot, "kubepods.slice", "*", "docker-"+id+".scope", pidFile), } var filename string diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netns/netns_unspecified.go b/cluster-autoscaler/vendor/github.com/vishvananda/netns/netns_others.go similarity index 63% rename from cluster-autoscaler/vendor/github.com/vishvananda/netns/netns_unspecified.go rename to cluster-autoscaler/vendor/github.com/vishvananda/netns/netns_others.go index d06af62b68ad..0489837741bc 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netns/netns_unspecified.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netns/netns_others.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package netns @@ -10,6 +11,14 @@ var ( ErrNotImplemented = errors.New("not implemented") ) +// Setns sets namespace using golang.org/x/sys/unix.Setns on Linux. It +// is not implemented on other platforms. +// +// Deprecated: Use golang.org/x/sys/unix.Setns instead. +func Setns(ns NsHandle, nstype int) (err error) { + return ErrNotImplemented +} + func Set(ns NsHandle) (err error) { return ErrNotImplemented } @@ -18,6 +27,14 @@ func New() (ns NsHandle, err error) { return -1, ErrNotImplemented } +func NewNamed(name string) (NsHandle, error) { + return -1, ErrNotImplemented +} + +func DeleteNamed(name string) error { + return ErrNotImplemented +} + func Get() (NsHandle, error) { return -1, ErrNotImplemented } diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netns/netns.go b/cluster-autoscaler/vendor/github.com/vishvananda/netns/nshandle_linux.go similarity index 76% rename from cluster-autoscaler/vendor/github.com/vishvananda/netns/netns.go rename to cluster-autoscaler/vendor/github.com/vishvananda/netns/nshandle_linux.go index 116befd548c4..0e1117106859 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netns/netns.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netns/nshandle_linux.go @@ -1,11 +1,3 @@ -// Package netns allows ultra-simple network namespace handling. NsHandles -// can be retrieved and set. Note that the current namespace is thread -// local so actions that set and reset namespaces should use LockOSThread -// to make sure the namespace doesn't change due to a goroutine switch. -// It is best to close NsHandles when you are done with them. This can be -// accomplished via a `defer ns.Close()` on the handle. Changing namespaces -// requires elevated privileges, so in most cases this code needs to be run -// as root. package netns import ( @@ -71,7 +63,7 @@ func (ns *NsHandle) Close() error { if err := unix.Close(int(*ns)); err != nil { return err } - (*ns) = -1 + *ns = -1 return nil } diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netns/nshandle_others.go b/cluster-autoscaler/vendor/github.com/vishvananda/netns/nshandle_others.go new file mode 100644 index 000000000000..7a87acbe201a --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netns/nshandle_others.go @@ -0,0 +1,45 @@ +//go:build !linux +// +build !linux + +package netns + +// NsHandle is a handle to a network namespace. It can only be used on Linux, +// but provides stub methods on other platforms. +type NsHandle int + +// Equal determines if two network handles refer to the same network +// namespace. It is only implemented on Linux. +func (ns NsHandle) Equal(_ NsHandle) bool { + return false +} + +// String shows the file descriptor number and its dev and inode. +// It is only implemented on Linux, and returns "NS(none)" on other +// platforms. +func (ns NsHandle) String() string { + return "NS(None)" +} + +// UniqueId returns a string which uniquely identifies the namespace +// associated with the network handle. It is only implemented on Linux, +// and returns "NS(none)" on other platforms. +func (ns NsHandle) UniqueId() string { + return "NS(none)" +} + +// IsOpen returns true if Close() has not been called. It is only implemented +// on Linux and always returns false on other platforms. +func (ns NsHandle) IsOpen() bool { + return false +} + +// Close closes the NsHandle and resets its file descriptor to -1. +// It is only implemented on Linux. +func (ns *NsHandle) Close() error { + return nil +} + +// None gets an empty (closed) NsHandle. +func None() NsHandle { + return NsHandle(-1) +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/CONTRIBUTORS b/cluster-autoscaler/vendor/github.com/vmware/govmomi/CONTRIBUTORS index 6a421d09912e..ef8c56de8b27 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/CONTRIBUTORS +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/CONTRIBUTORS @@ -5,104 +5,252 @@ Abhijeet Kasurde abrarshivani +Adam Chalkley +Adam Fowler Adam Shannon +Akanksha Panse +Al Biheiri Alessandro Cortiana +Alex Alex Bozhenko Alex Ellis (VMware) +Aligator <8278538+yet-another-aligator@users.noreply.github.com> Alvaro Miranda Amanda H. L. de Andrade +amanpaha Amit Bathla amit bezalel Andrew Andrew Chin Andrew Kutz +Andrey Klimentyev Anfernee Yongkun Gui angystardust aniketGslab +Ankit Vaidya +Ankur Huralikoppi +Anna Carrigan +Antony Saba +Ariel Chinn Arran Walker +Artem Anisimov +Arunesh Pandey Aryeh Weinreb +Augy StClair Austin Parker Balu Dontu bastienbc +Ben Corrie +Ben Vickers +Benjamin Davini +Benjamin Peterson +Benjamin Vickers +Bhavya Choudhary Bob Killen Brad Fitzpatrick +Brian Rak +brian57860 Bruce Downs +Bryan Venteicher Cédric Blomart +Cheng Cheng +Chethan Venkatesh +Choudhury Sarada Prasanna Nanda Chris Marchesi Christian Höltje Clint Greenwood +cpiment CuiHaozhi +Dan Ilan +Dan Norris +Daniel Frederick Crisman +Daniel Mueller Danny Lockard +Dave Gress +Dave Smith-Uchida Dave Tucker -Davide Agnello +David Gress David Stark +Davide Agnello Davinder Kumar +Defa +demarey +dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Deric Crago +ditsuke +Divyen Patel +Dnyanesh Gate Doug MacEachern +East <60801291+houfangdong@users.noreply.github.com> Eloy Coto +embano1 +Eng Zer Jun +Eric Edens +Eric Graham <16710890+Pheric@users.noreply.github.com> Eric Gray Eric Yutao Erik Hollensbe +Essodjolo KAHANAM Ethan Kaley +Evan Chu Fabio Rapposelli -Faiyaz Ahmed +Faiyaz Ahmed +Federico Pellegatta <12744504+federico-pellegatta@users.noreply.github.com> forkbomber +François Rigault freebsdly Gavin Gray Gavrie Philipson George Hicken Gerrit Renker gthombare +HakanSunay Hasan Mahmood +Haydon Ryan +Heiko Reese Henrik Hodne +hkumar +Hrabur Stoyanov +hui luo +Ian Eyberg Isaac Rodman +Ivan Mikushin Ivan Porto Carrero James King +James Peach Jason Kincl Jeremy Canady jeremy-clerc Jiatong Wang +jingyizPensando João Pereira Jonas Ausevicius Jorge Sevilla +Julien PILLON +Justin J. Novack kayrus +Keenan Brock Kevin George +Knappek +Leslie Wang leslie-qiwa +Lintong Jiang +Liping Xue Louie Jiang +Luther Monson +Madanagopal Arunachalam +makelarisjr <8687447+makelarisjr@users.noreply.github.com> maplain Marc Carmier +Marcus Tan Maria Ntalla Marin Atanasov Nikolov +Mario Trangoni +Mark Dechiaro +Mark Peek +Mark Rexwinkel +martin Matt Clay -Matthew Cosgrove +Matt Moore Matt Moriarity +Matthew Cosgrove +mbhadale +Merlijn Sebrechts Mevan Samaratunga +Michael Gasch <15986659+embano1@users.noreply.github.com> +Michael Gasch Michal Jankowski +Mike Schinkel +Mincho Tonev mingwei Nicolas Lamirault +Nikhil Kathare +Nikhil R Deshpande +Nikolas Grottendieck +Nils Elde +nirbhay +Nobuhiro MIKI +Om Kumar Omar Kohl Parham Alvani +Parveen Chahal +Paul Martin <25058109+rawstorage@users.noreply.github.com> Pierre Gronlier Pieter Noordhuis +pradeepj <50135054+pradeep288@users.noreply.github.com> +Pranshu Jain prydin +rconde01 +rHermes +Rianto Wahyudi +Ricardo Katz +Robin Watkins Rowan Jacobs +Roy Ling +rsikdar runner.mei +Ryan Johnson +S R Ashrith S.Çağlar Onur +Saad Malik +Sam Zhu +samzhu333 <45263849+samzhu333@users.noreply.github.com> +Sandeep Pissay Srinivasa Rao +Scott Holden Sergey Ignatov +serokles +shahra +Shalini Bhaskara +Shaozhen Ding +Shawn Neal +shylasrinivas +sky-joker +smaftoul +smahadik +Sten Feldman +Stepan Mazurov Steve Purcell +Sudhindra Aithal +SUMIT AGRAWAL +Sunny Carter +syuparn Takaaki Furukawa Tamas Eger +Tanay Kothari tanishi Ted Zlatanov +Thad Craft Thibaut Ackermann +Tim McNamara +Tjeu Kayim <15987676+TjeuKayim@users.noreply.github.com> +Toomas Pelberg Trevor Dawe +tshihad Uwe Bessle Vadim Egorov Vikram Krishnamurthy +volanja Volodymyr Bobyr +Waldek Maleska +William Lam Witold Krecicki +xing-yang +xinyanw409 Yang Yang +yangxi +Yann Hodique +Yash Nitin Desai +Yassine TIJANI +Yi Jiang +yiyingy +ykakarap +Yogesh Sobale <6104071+ysobale@users.noreply.github.com> +Yue Yin +Yun Zhou Yuya Kusakabe -Zacharias Taubert +Zach G Zach Tucker +Zacharias Taubert Zee Yang +zyuxin +Кузаков Евгений diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/find/finder.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/find/finder.go index 03767fc353b6..61ac780c45c0 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/find/finder.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/find/finder.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2014-2017 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2020 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -22,9 +22,11 @@ import ( "path" "strings" + "github.com/vmware/govmomi/internal" "github.com/vmware/govmomi/list" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/view" "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" @@ -38,16 +40,26 @@ type Finder struct { folders *object.DatacenterFolders } -func NewFinder(client *vim25.Client, all bool) *Finder { +func NewFinder(client *vim25.Client, all ...bool) *Finder { + props := false + if len(all) == 1 { + props = all[0] + } + f := &Finder{ client: client, si: object.NewSearchIndex(client), r: recurser{ Collector: property.DefaultCollector(client), - All: all, + All: props, }, } + if len(all) == 0 { + // attempt to avoid SetDatacenter() requirement + f.dc, _ = f.DefaultDatacenter(context.Background()) + } + return f } @@ -57,6 +69,18 @@ func (f *Finder) SetDatacenter(dc *object.Datacenter) *Finder { return f } +// InventoryPath composes the given object's inventory path. +// There is no vSphere property or method that provides an inventory path directly. +// This method uses the ManagedEntity.Parent field to determine the ancestry tree of the object and +// the ManagedEntity.Name field for each ancestor to compose the path. +func InventoryPath(ctx context.Context, client *vim25.Client, obj types.ManagedObjectReference) (string, error) { + entities, err := mo.Ancestors(ctx, client, client.ServiceContent.PropertyCollector, obj) + if err != nil { + return "", err + } + return internal.InventoryPath(entities), nil +} + // findRoot makes it possible to use "find" mode with a different root path. // Example: ResourcePoolList("/dc1/host/cluster1/...") func (f *Finder) findRoot(ctx context.Context, root *list.Element, parts []string) bool { @@ -93,8 +117,8 @@ func (f *Finder) find(ctx context.Context, arg string, s *spec) ([]list.Element, isPath := strings.Contains(arg, "/") root := list.Element{ - Path: "/", Object: object.NewRootFolder(f.client), + Path: "/", } parts := list.ToParts(arg) @@ -109,19 +133,10 @@ func (f *Finder) find(ctx context.Context, arg string, s *spec) ([]list.Element, return nil, err } - mes, err := mo.Ancestors(ctx, f.client, f.client.ServiceContent.PropertyCollector, pivot.Reference()) + root.Path, err = InventoryPath(ctx, f.client, pivot.Reference()) if err != nil { return nil, err } - - for _, me := range mes { - // Skip root entity in building inventory path. - if me.Parent == nil { - continue - } - root.Path = path.Join(root.Path, me.Name) - } - root.Object = pivot parts = parts[1:] } @@ -253,7 +268,7 @@ func (f *Finder) managedObjectList(ctx context.Context, path string, tl bool, in fn = f.dcReference } - if len(path) == 0 { + if path == "" { path = "." } @@ -271,8 +286,7 @@ func (f *Finder) managedObjectList(ctx context.Context, path string, tl bool, in return f.find(ctx, path, s) } -// Element returns an Element for the given ManagedObjectReference -// This method is only useful for looking up the InventoryPath of a ManagedObjectReference. +// Element is deprecated, use InventoryPath() instead. func (f *Finder) Element(ctx context.Context, ref types.ManagedObjectReference) (*list.Element, error) { rl := func(_ context.Context) (object.Reference, error) { return ref, nil @@ -301,7 +315,7 @@ func (f *Finder) Element(ctx context.Context, ref types.ManagedObjectReference) // ObjectReference converts the given ManagedObjectReference to a type from the object package via object.NewReference // with the object.Common.InventoryPath field set. func (f *Finder) ObjectReference(ctx context.Context, ref types.ManagedObjectReference) (object.Reference, error) { - e, err := f.Element(ctx, ref) + path, err := InventoryPath(ctx, f.client, ref) if err != nil { return nil, err } @@ -312,7 +326,7 @@ func (f *Finder) ObjectReference(ctx context.Context, ref types.ManagedObjectRef SetInventoryPath(string) } - r.(common).SetInventoryPath(e.Path) + r.(common).SetInventoryPath(path) if f.dc != nil { if ds, ok := r.(*object.Datastore); ok { @@ -776,9 +790,26 @@ func (f *Finder) NetworkList(ctx context.Context, path string) ([]object.Network return ns, nil } +// Network finds a NetworkReference using a Name, Inventory Path, ManagedObject ID, Logical Switch UUID or Segment ID. +// With standard vSphere networking, Portgroups cannot have the same name within the same network folder. +// With NSX, Portgroups can have the same name, even within the same Switch. In this case, using an inventory path +// results in a MultipleFoundError. A MOID, switch UUID or segment ID can be used instead, as both are unique. +// See also: https://kb.vmware.com/s/article/79872#Duplicate_names +// Examples: +// - Name: "dvpg-1" +// - Inventory Path: "vds-1/dvpg-1" +// - ManagedObject ID: "DistributedVirtualPortgroup:dvportgroup-53" +// - Logical Switch UUID: "da2a59b8-2450-4cb2-b5cc-79c4c1d2144c" +// - Segment ID: "/infra/segments/vnet_ce50e69b-1784-4a14-9206-ffd7f1f146f7" func (f *Finder) Network(ctx context.Context, path string) (object.NetworkReference, error) { networks, err := f.NetworkList(ctx, path) if err != nil { + if _, ok := err.(*NotFoundError); ok { + net, nerr := f.networkByID(ctx, path) + if nerr == nil { + return net, nil + } + } return nil, err } @@ -789,6 +820,41 @@ func (f *Finder) Network(ctx context.Context, path string) (object.NetworkRefere return networks[0], nil } +func (f *Finder) networkByID(ctx context.Context, path string) (object.NetworkReference, error) { + if ref := object.ReferenceFromString(path); ref != nil { + // This is a MOID + return object.NewReference(f.client, *ref).(object.NetworkReference), nil + } + + kind := []string{"DistributedVirtualPortgroup"} + + m := view.NewManager(f.client) + v, err := m.CreateContainerView(ctx, f.client.ServiceContent.RootFolder, kind, true) + if err != nil { + return nil, err + } + defer v.Destroy(ctx) + + filter := property.Filter{ + "config.logicalSwitchUuid": path, + "config.segmentId": path, + } + + refs, err := v.FindAny(ctx, kind, filter) + if err != nil { + return nil, err + } + + if len(refs) == 0 { + return nil, &NotFoundError{"network", path} + } + if len(refs) > 1 { + return nil, &MultipleFoundError{"network", path} + } + + return object.NewReference(f.client, refs[0]).(object.NetworkReference), nil +} + func (f *Finder) DefaultNetwork(ctx context.Context) (object.NetworkReference, error) { network, err := f.Network(ctx, "*") if err != nil { diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/history/collector.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/history/collector.go new file mode 100644 index 000000000000..9b25ea85a14a --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/history/collector.go @@ -0,0 +1,91 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package history + +import ( + "context" + + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +type Collector struct { + r types.ManagedObjectReference + c *vim25.Client +} + +func NewCollector(c *vim25.Client, ref types.ManagedObjectReference) *Collector { + return &Collector{ + r: ref, + c: c, + } +} + +// Reference returns the managed object reference of this collector +func (c Collector) Reference() types.ManagedObjectReference { + return c.r +} + +// Client returns the vim25 client used by this collector +func (c Collector) Client() *vim25.Client { + return c.c +} + +// Properties wraps property.DefaultCollector().RetrieveOne() and returns +// properties for the specified managed object reference +func (c Collector) Properties(ctx context.Context, r types.ManagedObjectReference, ps []string, dst interface{}) error { + return property.DefaultCollector(c.c).RetrieveOne(ctx, r, ps, dst) +} + +func (c Collector) Destroy(ctx context.Context) error { + req := types.DestroyCollector{ + This: c.r, + } + + _, err := methods.DestroyCollector(ctx, c.c, &req) + return err +} + +func (c Collector) Reset(ctx context.Context) error { + req := types.ResetCollector{ + This: c.r, + } + + _, err := methods.ResetCollector(ctx, c.c, &req) + return err +} + +func (c Collector) Rewind(ctx context.Context) error { + req := types.RewindCollector{ + This: c.r, + } + + _, err := methods.RewindCollector(ctx, c.c, &req) + return err +} + +func (c Collector) SetPageSize(ctx context.Context, maxCount int32) error { + req := types.SetCollectorPageSize{ + This: c.r, + MaxCount: maxCount, + } + + _, err := methods.SetCollectorPageSize(ctx, c.c, &req) + return err +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/internal/helpers.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/internal/helpers.go new file mode 100644 index 000000000000..b3eafeadfd92 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/internal/helpers.go @@ -0,0 +1,63 @@ +/* +Copyright (c) 2020 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "net" + "path" + + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +// InventoryPath composed of entities by Name +func InventoryPath(entities []mo.ManagedEntity) string { + val := "/" + + for _, entity := range entities { + // Skip root folder in building inventory path. + if entity.Parent == nil { + continue + } + val = path.Join(val, entity.Name) + } + + return val +} + +func HostSystemManagementIPs(config []types.VirtualNicManagerNetConfig) []net.IP { + var ips []net.IP + + for _, nc := range config { + if nc.NicType != string(types.HostVirtualNicManagerNicTypeManagement) { + continue + } + for ix := range nc.CandidateVnic { + for _, selectedVnicKey := range nc.SelectedVnic { + if nc.CandidateVnic[ix].Key != selectedVnicKey { + continue + } + ip := net.ParseIP(nc.CandidateVnic[ix].Spec.Ip.IpAddress) + if ip != nil { + ips = append(ips, ip) + } + } + } + } + + return ips +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/internal/methods.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/internal/methods.go new file mode 100644 index 000000000000..95ccee8d2471 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/internal/methods.go @@ -0,0 +1,123 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + + "github.com/vmware/govmomi/vim25/soap" +) + +type RetrieveDynamicTypeManagerBody struct { + Req *RetrieveDynamicTypeManagerRequest `xml:"urn:vim25 RetrieveDynamicTypeManager"` + Res *RetrieveDynamicTypeManagerResponse `xml:"urn:vim25 RetrieveDynamicTypeManagerResponse"` + Fault_ *soap.Fault +} + +func (b *RetrieveDynamicTypeManagerBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveDynamicTypeManager(ctx context.Context, r soap.RoundTripper, req *RetrieveDynamicTypeManagerRequest) (*RetrieveDynamicTypeManagerResponse, error) { + var reqBody, resBody RetrieveDynamicTypeManagerBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveManagedMethodExecuterBody struct { + Req *RetrieveManagedMethodExecuterRequest `xml:"urn:vim25 RetrieveManagedMethodExecuter"` + Res *RetrieveManagedMethodExecuterResponse `xml:"urn:vim25 RetrieveManagedMethodExecuterResponse"` + Fault_ *soap.Fault +} + +func (b *RetrieveManagedMethodExecuterBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveManagedMethodExecuter(ctx context.Context, r soap.RoundTripper, req *RetrieveManagedMethodExecuterRequest) (*RetrieveManagedMethodExecuterResponse, error) { + var reqBody, resBody RetrieveManagedMethodExecuterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DynamicTypeMgrQueryMoInstancesBody struct { + Req *DynamicTypeMgrQueryMoInstancesRequest `xml:"urn:vim25 DynamicTypeMgrQueryMoInstances"` + Res *DynamicTypeMgrQueryMoInstancesResponse `xml:"urn:vim25 DynamicTypeMgrQueryMoInstancesResponse"` + Fault_ *soap.Fault +} + +func (b *DynamicTypeMgrQueryMoInstancesBody) Fault() *soap.Fault { return b.Fault_ } + +func DynamicTypeMgrQueryMoInstances(ctx context.Context, r soap.RoundTripper, req *DynamicTypeMgrQueryMoInstancesRequest) (*DynamicTypeMgrQueryMoInstancesResponse, error) { + var reqBody, resBody DynamicTypeMgrQueryMoInstancesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DynamicTypeMgrQueryTypeInfoBody struct { + Req *DynamicTypeMgrQueryTypeInfoRequest `xml:"urn:vim25 DynamicTypeMgrQueryTypeInfo"` + Res *DynamicTypeMgrQueryTypeInfoResponse `xml:"urn:vim25 DynamicTypeMgrQueryTypeInfoResponse"` + Fault_ *soap.Fault +} + +func (b *DynamicTypeMgrQueryTypeInfoBody) Fault() *soap.Fault { return b.Fault_ } + +func DynamicTypeMgrQueryTypeInfo(ctx context.Context, r soap.RoundTripper, req *DynamicTypeMgrQueryTypeInfoRequest) (*DynamicTypeMgrQueryTypeInfoResponse, error) { + var reqBody, resBody DynamicTypeMgrQueryTypeInfoBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ExecuteSoapBody struct { + Req *ExecuteSoapRequest `xml:"urn:vim25 ExecuteSoap"` + Res *ExecuteSoapResponse `xml:"urn:vim25 ExecuteSoapResponse"` + Fault_ *soap.Fault +} + +func (b *ExecuteSoapBody) Fault() *soap.Fault { return b.Fault_ } + +func ExecuteSoap(ctx context.Context, r soap.RoundTripper, req *ExecuteSoapRequest) (*ExecuteSoapResponse, error) { + var reqBody, resBody ExecuteSoapBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/internal/types.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/internal/types.go new file mode 100644 index 000000000000..b84103b89bb2 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/internal/types.go @@ -0,0 +1,270 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "reflect" + + "github.com/vmware/govmomi/vim25/types" +) + +type DynamicTypeMgrQueryMoInstancesRequest struct { + This types.ManagedObjectReference `xml:"_this"` + FilterSpec BaseDynamicTypeMgrFilterSpec `xml:"filterSpec,omitempty,typeattr"` +} + +type DynamicTypeMgrQueryMoInstancesResponse struct { + Returnval []DynamicTypeMgrMoInstance `xml:"urn:vim25 returnval"` +} + +type DynamicTypeEnumTypeInfo struct { + types.DynamicData + + Name string `xml:"name"` + WsdlName string `xml:"wsdlName"` + Version string `xml:"version"` + Value []string `xml:"value,omitempty"` + Annotation []DynamicTypeMgrAnnotation `xml:"annotation,omitempty"` +} + +func init() { + types.Add("DynamicTypeEnumTypeInfo", reflect.TypeOf((*DynamicTypeEnumTypeInfo)(nil)).Elem()) +} + +type DynamicTypeMgrAllTypeInfoRequest struct { + types.DynamicData + + ManagedTypeInfo []DynamicTypeMgrManagedTypeInfo `xml:"managedTypeInfo,omitempty"` + EnumTypeInfo []DynamicTypeEnumTypeInfo `xml:"enumTypeInfo,omitempty"` + DataTypeInfo []DynamicTypeMgrDataTypeInfo `xml:"dataTypeInfo,omitempty"` +} + +func init() { + types.Add("DynamicTypeMgrAllTypeInfo", reflect.TypeOf((*DynamicTypeMgrAllTypeInfoRequest)(nil)).Elem()) +} + +type DynamicTypeMgrAnnotation struct { + types.DynamicData + + Name string `xml:"name"` + Parameter []string `xml:"parameter,omitempty"` +} + +func init() { + types.Add("DynamicTypeMgrAnnotation", reflect.TypeOf((*DynamicTypeMgrAnnotation)(nil)).Elem()) +} + +type DynamicTypeMgrDataTypeInfo struct { + types.DynamicData + + Name string `xml:"name"` + WsdlName string `xml:"wsdlName"` + Version string `xml:"version"` + Base []string `xml:"base,omitempty"` + Property []DynamicTypeMgrPropertyTypeInfo `xml:"property,omitempty"` + Annotation []DynamicTypeMgrAnnotation `xml:"annotation,omitempty"` +} + +func init() { + types.Add("DynamicTypeMgrDataTypeInfo", reflect.TypeOf((*DynamicTypeMgrDataTypeInfo)(nil)).Elem()) +} + +func (b *DynamicTypeMgrFilterSpec) GetDynamicTypeMgrFilterSpec() *DynamicTypeMgrFilterSpec { return b } + +type BaseDynamicTypeMgrFilterSpec interface { + GetDynamicTypeMgrFilterSpec() *DynamicTypeMgrFilterSpec +} + +type DynamicTypeMgrFilterSpec struct { + types.DynamicData +} + +func init() { + types.Add("DynamicTypeMgrFilterSpec", reflect.TypeOf((*DynamicTypeMgrFilterSpec)(nil)).Elem()) +} + +type DynamicTypeMgrManagedTypeInfo struct { + types.DynamicData + + Name string `xml:"name"` + WsdlName string `xml:"wsdlName"` + Version string `xml:"version"` + Base []string `xml:"base,omitempty"` + Property []DynamicTypeMgrPropertyTypeInfo `xml:"property,omitempty"` + Method []DynamicTypeMgrMethodTypeInfo `xml:"method,omitempty"` + Annotation []DynamicTypeMgrAnnotation `xml:"annotation,omitempty"` +} + +func init() { + types.Add("DynamicTypeMgrManagedTypeInfo", reflect.TypeOf((*DynamicTypeMgrManagedTypeInfo)(nil)).Elem()) +} + +type DynamicTypeMgrMethodTypeInfo struct { + types.DynamicData + + Name string `xml:"name"` + WsdlName string `xml:"wsdlName"` + Version string `xml:"version"` + ParamTypeInfo []DynamicTypeMgrParamTypeInfo `xml:"paramTypeInfo,omitempty"` + ReturnTypeInfo *DynamicTypeMgrParamTypeInfo `xml:"returnTypeInfo,omitempty"` + Fault []string `xml:"fault,omitempty"` + PrivId string `xml:"privId,omitempty"` + Annotation []DynamicTypeMgrAnnotation `xml:"annotation,omitempty"` +} + +func init() { + types.Add("DynamicTypeMgrMethodTypeInfo", reflect.TypeOf((*DynamicTypeMgrMethodTypeInfo)(nil)).Elem()) +} + +type DynamicTypeMgrMoFilterSpec struct { + DynamicTypeMgrFilterSpec + + Id string `xml:"id,omitempty"` + TypeSubstr string `xml:"typeSubstr,omitempty"` +} + +func init() { + types.Add("DynamicTypeMgrMoFilterSpec", reflect.TypeOf((*DynamicTypeMgrMoFilterSpec)(nil)).Elem()) +} + +type DynamicTypeMgrMoInstance struct { + types.DynamicData + + Id string `xml:"id"` + MoType string `xml:"moType"` +} + +func init() { + types.Add("DynamicTypeMgrMoInstance", reflect.TypeOf((*DynamicTypeMgrMoInstance)(nil)).Elem()) +} + +type DynamicTypeMgrParamTypeInfo struct { + types.DynamicData + + Name string `xml:"name"` + Version string `xml:"version"` + Type string `xml:"type"` + PrivId string `xml:"privId,omitempty"` + Annotation []DynamicTypeMgrAnnotation `xml:"annotation,omitempty"` +} + +func init() { + types.Add("DynamicTypeMgrParamTypeInfo", reflect.TypeOf((*DynamicTypeMgrParamTypeInfo)(nil)).Elem()) +} + +type DynamicTypeMgrPropertyTypeInfo struct { + types.DynamicData + + Name string `xml:"name"` + Version string `xml:"version"` + Type string `xml:"type"` + PrivId string `xml:"privId,omitempty"` + MsgIdFormat string `xml:"msgIdFormat,omitempty"` + Annotation []DynamicTypeMgrAnnotation `xml:"annotation,omitempty"` +} + +type DynamicTypeMgrQueryTypeInfoRequest struct { + This types.ManagedObjectReference `xml:"_this"` + FilterSpec BaseDynamicTypeMgrFilterSpec `xml:"filterSpec,omitempty,typeattr"` +} + +type DynamicTypeMgrQueryTypeInfoResponse struct { + Returnval DynamicTypeMgrAllTypeInfoRequest `xml:"urn:vim25 returnval"` +} + +func init() { + types.Add("DynamicTypeMgrPropertyTypeInfo", reflect.TypeOf((*DynamicTypeMgrPropertyTypeInfo)(nil)).Elem()) +} + +type DynamicTypeMgrTypeFilterSpec struct { + DynamicTypeMgrFilterSpec + + TypeSubstr string `xml:"typeSubstr,omitempty"` +} + +func init() { + types.Add("DynamicTypeMgrTypeFilterSpec", reflect.TypeOf((*DynamicTypeMgrTypeFilterSpec)(nil)).Elem()) +} + +type ReflectManagedMethodExecuterSoapArgument struct { + types.DynamicData + + Name string `xml:"name"` + Val string `xml:"val"` +} + +func init() { + types.Add("ReflectManagedMethodExecuterSoapArgument", reflect.TypeOf((*ReflectManagedMethodExecuterSoapArgument)(nil)).Elem()) +} + +type ReflectManagedMethodExecuterSoapFault struct { + types.DynamicData + + FaultMsg string `xml:"faultMsg"` + FaultDetail string `xml:"faultDetail,omitempty"` +} + +func init() { + types.Add("ReflectManagedMethodExecuterSoapFault", reflect.TypeOf((*ReflectManagedMethodExecuterSoapFault)(nil)).Elem()) +} + +type ReflectManagedMethodExecuterSoapResult struct { + types.DynamicData + + Response string `xml:"response,omitempty"` + Fault *ReflectManagedMethodExecuterSoapFault `xml:"fault,omitempty"` +} + +type RetrieveDynamicTypeManagerRequest struct { + This types.ManagedObjectReference `xml:"_this"` +} + +type RetrieveDynamicTypeManagerResponse struct { + Returnval *InternalDynamicTypeManager `xml:"urn:vim25 returnval"` +} + +type RetrieveManagedMethodExecuterRequest struct { + This types.ManagedObjectReference `xml:"_this"` +} + +func init() { + types.Add("RetrieveManagedMethodExecuter", reflect.TypeOf((*RetrieveManagedMethodExecuterRequest)(nil)).Elem()) +} + +type RetrieveManagedMethodExecuterResponse struct { + Returnval *ReflectManagedMethodExecuter `xml:"urn:vim25 returnval"` +} + +type InternalDynamicTypeManager struct { + types.ManagedObjectReference +} + +type ReflectManagedMethodExecuter struct { + types.ManagedObjectReference +} + +type ExecuteSoapRequest struct { + This types.ManagedObjectReference `xml:"_this"` + Moid string `xml:"moid"` + Version string `xml:"version"` + Method string `xml:"method"` + Argument []ReflectManagedMethodExecuterSoapArgument `xml:"argument,omitempty"` +} + +type ExecuteSoapResponse struct { + Returnval *ReflectManagedMethodExecuterSoapResult `xml:"urn:vim25 returnval"` +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/internal/version/version.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/internal/version/version.go new file mode 100644 index 000000000000..b141a9597edf --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/internal/version/version.go @@ -0,0 +1,25 @@ +/* +Copyright (c) 2014-2022 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +const ( + // ClientName is the name of this SDK + ClientName = "govmomi" + + // ClientVersion is the version of this SDK + ClientVersion = "0.30.0" +) diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/list/lister.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/list/lister.go index 2ee32e6bc174..9a4caed68625 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/list/lister.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/list/lister.go @@ -165,6 +165,8 @@ func (l Lister) List(ctx context.Context) ([]Element, error) { return l.ListHostSystem(ctx) case "VirtualApp": return l.ListVirtualApp(ctx) + case "VmwareDistributedVirtualSwitch", "DistributedVirtualSwitch": + return l.ListDistributedVirtualSwitch(ctx) default: return nil, fmt.Errorf("cannot traverse type " + l.Reference.Type) } @@ -497,6 +499,69 @@ func (l Lister) ListHostSystem(ctx context.Context) ([]Element, error) { return es, nil } +func (l Lister) ListDistributedVirtualSwitch(ctx context.Context) ([]Element, error) { + ospec := types.ObjectSpec{ + Obj: l.Reference, + Skip: types.NewBool(true), + } + + fields := []string{ + "portgroup", + } + + for _, f := range fields { + tspec := types.TraversalSpec{ + Path: f, + Skip: types.NewBool(false), + Type: "DistributedVirtualSwitch", + } + + ospec.SelectSet = append(ospec.SelectSet, &tspec) + } + + childTypes := []string{ + "DistributedVirtualPortgroup", + } + + var pspecs []types.PropertySpec + for _, t := range childTypes { + pspec := types.PropertySpec{ + Type: t, + } + + if l.All { + pspec.All = types.NewBool(true) + } else { + pspec.PathSet = []string{"name"} + } + + pspecs = append(pspecs, pspec) + } + + req := types.RetrieveProperties{ + SpecSet: []types.PropertyFilterSpec{ + { + ObjectSet: []types.ObjectSpec{ospec}, + PropSet: pspecs, + }, + }, + } + + var dst []interface{} + + err := l.retrieveProperties(ctx, req, &dst) + if err != nil { + return nil, err + } + + es := []Element{} + for _, v := range dst { + es = append(es, ToElement(v.(mo.Reference), l.Prefix)) + } + + return es, nil +} + func (l Lister) ListVirtualApp(ctx context.Context) ([]Element, error) { ospec := types.ObjectSpec{ Obj: l.Reference, diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/lookup/client.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/lookup/client.go index 896909fceed2..b3c19846a10e 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/lookup/client.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/lookup/client.go @@ -25,6 +25,7 @@ import ( "github.com/vmware/govmomi/lookup/methods" "github.com/vmware/govmomi/lookup/types" + "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/soap" vim "github.com/vmware/govmomi/vim25/types" @@ -47,12 +48,28 @@ var ( type Client struct { *soap.Client + RoundTripper soap.RoundTripper + ServiceContent types.LookupServiceContent } // NewClient returns a client targeting the SSO Lookup Service API endpoint. func NewClient(ctx context.Context, c *vim25.Client) (*Client, error) { - sc := c.Client.NewServiceClient(Path, Namespace) + // PSC may be external, attempt to derive from sts.uri + path := &url.URL{Path: Path} + if c.ServiceContent.Setting != nil { + m := object.NewOptionManager(c, *c.ServiceContent.Setting) + opts, err := m.Query(ctx, "config.vpxd.sso.sts.uri") + if err == nil && len(opts) == 1 { + u, err := url.Parse(opts[0].GetOptionValue().Value.(string)) + if err == nil { + path.Scheme = u.Scheme + path.Host = u.Host + } + } + } + + sc := c.Client.NewServiceClient(path.String(), Namespace) sc.Version = Version req := types.RetrieveServiceContent{ @@ -64,7 +81,12 @@ func NewClient(ctx context.Context, c *vim25.Client) (*Client, error) { return nil, err } - return &Client{sc, res.Returnval}, nil + return &Client{sc, sc, res.Returnval}, nil +} + +// RoundTrip dispatches to the RoundTripper field. +func (c *Client) RoundTrip(ctx context.Context, req, res soap.HasFault) error { + return c.RoundTripper.RoundTrip(ctx, req, res) } func (c *Client) List(ctx context.Context, filter *types.LookupServiceRegistrationFilter) ([]types.LookupServiceRegistrationInfo, error) { diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/nfc/lease.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/nfc/lease.go index 3fb85ee6f6e1..457568033624 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/nfc/lease.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/nfc/lease.go @@ -18,16 +18,15 @@ package nfc import ( "context" - "errors" "fmt" "io" "path" "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/task" "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/methods" "github.com/vmware/govmomi/vim25/mo" - "github.com/vmware/govmomi/vim25/progress" "github.com/vmware/govmomi/vim25/soap" "github.com/vmware/govmomi/vim25/types" ) @@ -195,7 +194,7 @@ func (l *Lease) Wait(ctx context.Context, items []types.OvfFileItem) (*LeaseInfo } if lease.Error != nil { - return nil, errors.New(lease.Error.LocalizedMessage) + return nil, &task.Error{LocalizedMethodFault: lease.Error} } return nil, fmt.Errorf("unexpected nfc lease state: %s", lease.State) @@ -208,8 +207,6 @@ func (l *Lease) StartUpdater(ctx context.Context, info *LeaseInfo) *LeaseUpdater func (l *Lease) Upload(ctx context.Context, item FileItem, f io.Reader, opts soap.Upload) error { if opts.Progress == nil { opts.Progress = item - } else { - opts.Progress = progress.Tee(item, opts.Progress) } // Non-disk files (such as .iso) use the PUT method. @@ -230,8 +227,6 @@ func (l *Lease) Upload(ctx context.Context, item FileItem, f io.Reader, opts soa func (l *Lease) DownloadFile(ctx context.Context, file string, item FileItem, opts soap.Download) error { if opts.Progress == nil { opts.Progress = item - } else { - opts.Progress = progress.Tee(item, opts.Progress) } return l.c.DownloadFile(ctx, file, item.URL, &opts) diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/nfc/lease_updater.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/nfc/lease_updater.go index d77c3596aeef..02ce9cf5372c 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/nfc/lease_updater.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/nfc/lease_updater.go @@ -57,8 +57,8 @@ func (o FileItem) File() types.OvfFile { } type LeaseUpdater struct { - pos int64 // Number of bytes (keep first to ensure 64 bit aligment) - total int64 // Total number of bytes (keep first to ensure 64 bit aligment) + pos int64 // Number of bytes (keep first to ensure 64 bit alignment) + total int64 // Total number of bytes (keep first to ensure 64 bit alignment) lease *Lease diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/authorization_manager.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/authorization_manager.go index b703258fe7a3..5cd6851a87dc 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/authorization_manager.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/authorization_manager.go @@ -172,3 +172,50 @@ func (m AuthorizationManager) UpdateRole(ctx context.Context, id int32, name str _, err := methods.UpdateAuthorizationRole(ctx, m.Client(), &req) return err } + +func (m AuthorizationManager) HasUserPrivilegeOnEntities(ctx context.Context, entities []types.ManagedObjectReference, userName string, privID []string) ([]types.EntityPrivilege, error) { + req := types.HasUserPrivilegeOnEntities{ + This: m.Reference(), + Entities: entities, + UserName: userName, + PrivId: privID, + } + + res, err := methods.HasUserPrivilegeOnEntities(ctx, m.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +func (m AuthorizationManager) HasPrivilegeOnEntity(ctx context.Context, entity types.ManagedObjectReference, sessionID string, privID []string) ([]bool, error) { + req := types.HasPrivilegeOnEntity{ + This: m.Reference(), + Entity: entity, + SessionId: sessionID, + PrivId: privID, + } + + res, err := methods.HasPrivilegeOnEntity(ctx, m.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +func (m AuthorizationManager) FetchUserPrivilegeOnEntities(ctx context.Context, entities []types.ManagedObjectReference, userName string) ([]types.UserPrivilegeResult, error) { + req := types.FetchUserPrivilegeOnEntities{ + This: m.Reference(), + Entities: entities, + UserName: userName, + } + + res, err := methods.FetchUserPrivilegeOnEntities(ctx, m.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/cluster_compute_resource.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/cluster_compute_resource.go index 24c346825ad1..018372dfe080 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/cluster_compute_resource.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/cluster_compute_resource.go @@ -87,3 +87,17 @@ func (c ClusterComputeResource) MoveInto(ctx context.Context, hosts ...*HostSyst return NewTask(c.c, res.Returnval), nil } + +func (c ClusterComputeResource) PlaceVm(ctx context.Context, spec types.PlacementSpec) (*types.PlacementResult, error) { + req := types.PlaceVm{ + This: c.Reference(), + PlacementSpec: spec, + } + + res, err := methods.PlaceVm(ctx, c.c, &req) + if err != nil { + return nil, err + } + + return &res.Returnval, nil +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/common.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/common.go index abb4076c7fa9..88ce78fce524 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/common.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/common.go @@ -75,29 +75,22 @@ func (c *Common) SetInventoryPath(p string) { c.InventoryPath = p } -// ObjectName returns the base name of the InventoryPath field if set, -// otherwise fetches the mo.ManagedEntity.Name field via the property collector. +// ObjectName fetches the mo.ManagedEntity.Name field via the property collector. func (c Common) ObjectName(ctx context.Context) (string, error) { - var o mo.ManagedEntity + var content []types.ObjectContent - err := c.Properties(ctx, c.Reference(), []string{"name"}, &o) + err := c.Properties(ctx, c.Reference(), []string{"name"}, &content) if err != nil { return "", err } - if o.Name != "" { - return o.Name, nil + for i := range content { + for _, prop := range content[i].PropSet { + return prop.Val.(string), nil + } } - // Network has its own "name" field... - var n mo.Network - - err = c.Properties(ctx, c.Reference(), []string{"name"}, &n) - if err != nil { - return "", err - } - - return n.Name, nil + return "", nil } // Properties is a wrapper for property.DefaultCollector().RetrieveOne() @@ -142,3 +135,14 @@ func (c Common) SetCustomValue(ctx context.Context, key string, value string) er _, err := methods.SetCustomValue(ctx, c.c, &req) return err } + +func ReferenceFromString(s string) *types.ManagedObjectReference { + var ref types.ManagedObjectReference + if !ref.FromString(s) { + return nil + } + if mo.IsManagedObjectType(ref.Type) { + return &ref + } + return nil +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/customization_spec_manager.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/customization_spec_manager.go index cb8b965dc6e4..e9a3914d9da9 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/customization_spec_manager.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/customization_spec_manager.go @@ -21,6 +21,7 @@ import ( "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" ) @@ -36,6 +37,12 @@ func NewCustomizationSpecManager(c *vim25.Client) *CustomizationSpecManager { return &cs } +func (cs CustomizationSpecManager) Info(ctx context.Context) ([]types.CustomizationSpecInfo, error) { + var m mo.CustomizationSpecManager + err := cs.Properties(ctx, cs.Reference(), []string{"info"}, &m) + return m.Info, err +} + func (cs CustomizationSpecManager) DoesCustomizationSpecExist(ctx context.Context, name string) (bool, error) { req := types.DoesCustomizationSpecExist{ This: cs.Reference(), diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/datastore.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/datastore.go index 46a99950a625..65264ae152dd 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/datastore.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/datastore.go @@ -68,6 +68,11 @@ func NewDatastore(c *vim25.Client, ref types.ManagedObjectReference) *Datastore } func (d Datastore) Path(path string) string { + var p DatastorePath + if p.FromString(path) { + return p.String() // already in "[datastore] path" format + } + return (&DatastorePath{ Datastore: d.Name(), Path: path, diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/datastore_file.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/datastore_file.go index bc010fc36e81..86d7d9c728f5 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/datastore_file.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/datastore_file.go @@ -297,10 +297,8 @@ func (f *DatastoreFile) TailFunc(lines int, include func(line int, message strin nread = bsize + remain eof = true - } else { - if pos, err = f.Seek(offset, io.SeekEnd); err != nil { - return err - } + } else if pos, err = f.Seek(offset, io.SeekEnd); err != nil { + return err } if _, err = io.CopyN(buf, f, nread); err != nil { diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/datastore_path.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/datastore_path.go index 1563ee1e11db..104c7dfe35e6 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/datastore_path.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/datastore_path.go @@ -31,7 +31,7 @@ type DatastorePath struct { // FromString parses a datastore path. // Returns true if the path could be parsed, false otherwise. func (p *DatastorePath) FromString(s string) bool { - if len(s) == 0 { + if s == "" { return false } diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/diagnostic_manager.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/diagnostic_manager.go index 5baf1ad90383..026dc1cb5e6f 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/diagnostic_manager.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/diagnostic_manager.go @@ -71,10 +71,8 @@ func (m DiagnosticManager) GenerateLogBundles(ctx context.Context, includeDefaul IncludeDefault: includeDefault, } - if host != nil { - for _, h := range host { - req.Host = append(req.Host, h.Reference()) - } + for _, h := range host { + req.Host = append(req.Host, h.Reference()) } res, err := methods.GenerateLogBundles_Task(ctx, m.c, &req) diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/distributed_virtual_portgroup.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/distributed_virtual_portgroup.go index f8ac5512c1f0..c2abb8fabde7 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/distributed_virtual_portgroup.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/distributed_virtual_portgroup.go @@ -36,6 +36,10 @@ func NewDistributedVirtualPortgroup(c *vim25.Client, ref types.ManagedObjectRefe } } +func (p DistributedVirtualPortgroup) GetInventoryPath() string { + return p.InventoryPath +} + // EthernetCardBackingInfo returns the VirtualDeviceBackingInfo for this DistributedVirtualPortgroup func (p DistributedVirtualPortgroup) EthernetCardBackingInfo(ctx context.Context) (types.BaseVirtualDeviceBackingInfo, error) { var dvp mo.DistributedVirtualPortgroup @@ -46,9 +50,15 @@ func (p DistributedVirtualPortgroup) EthernetCardBackingInfo(ctx context.Context return nil, err } + // From the docs at https://code.vmware.com/apis/196/vsphere/doc/vim.dvs.DistributedVirtualPortgroup.ConfigInfo.html: // "This property should always be set unless the user's setting does not have System.Read privilege on the object referred to by this property." + // Note that "the object" refers to the Switch, not the PortGroup. if dvp.Config.DistributedVirtualSwitch == nil { - return nil, fmt.Errorf("no System.Read privilege on: %s.%s", p.Reference(), prop) + name := p.InventoryPath + if name == "" { + name = p.Reference().String() + } + return nil, fmt.Errorf("failed to create EthernetCardBackingInfo for %s: System.Read privilege required for %s", name, prop) } if err := p.Properties(ctx, *dvp.Config.DistributedVirtualSwitch, []string{"uuid"}, &dvs); err != nil { diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/distributed_virtual_switch.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/distributed_virtual_switch.go index 526ce4bf7871..66650e1d06b6 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/distributed_virtual_switch.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/distributed_virtual_switch.go @@ -18,6 +18,7 @@ package object import ( "context" + "fmt" "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/methods" @@ -34,8 +35,17 @@ func NewDistributedVirtualSwitch(c *vim25.Client, ref types.ManagedObjectReferen } } +func (s DistributedVirtualSwitch) GetInventoryPath() string { + return s.InventoryPath +} + func (s DistributedVirtualSwitch) EthernetCardBackingInfo(ctx context.Context) (types.BaseVirtualDeviceBackingInfo, error) { - return nil, ErrNotSupported // TODO: just to satisfy NetworkReference interface for the finder + ref := s.Reference() + name := s.InventoryPath + if name == "" { + name = ref.String() + } + return nil, fmt.Errorf("type %s (%s) cannot be used for EthernetCardBackingInfo", ref.Type, name) } func (s DistributedVirtualSwitch) Reconfigure(ctx context.Context, spec types.BaseDVSConfigSpec) (*Task, error) { @@ -78,3 +88,31 @@ func (s DistributedVirtualSwitch) FetchDVPorts(ctx context.Context, criteria *ty } return res.Returnval, nil } + +func (s DistributedVirtualSwitch) ReconfigureDVPort(ctx context.Context, spec []types.DVPortConfigSpec) (*Task, error) { + req := types.ReconfigureDVPort_Task{ + This: s.Reference(), + Port: spec, + } + + res, err := methods.ReconfigureDVPort_Task(ctx, s.Client(), &req) + if err != nil { + return nil, err + } + + return NewTask(s.Client(), res.Returnval), nil +} + +func (s DistributedVirtualSwitch) ReconfigureLACP(ctx context.Context, spec []types.VMwareDvsLacpGroupSpec) (*Task, error) { + req := types.UpdateDVSLacpGroupConfig_Task{ + This: s.Reference(), + LacpGroupSpec: spec, + } + + res, err := methods.UpdateDVSLacpGroupConfig_Task(ctx, s.Client(), &req) + if err != nil { + return nil, err + } + + return NewTask(s.Client(), res.Returnval), nil +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/folder.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/folder.go index 7a69949f95af..6e0a7649b9da 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/folder.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/folder.go @@ -225,3 +225,17 @@ func (f Folder) MoveInto(ctx context.Context, list []types.ManagedObjectReferenc return NewTask(f.c, res.Returnval), nil } + +func (f Folder) PlaceVmsXCluster(ctx context.Context, spec types.PlaceVmsXClusterSpec) (*types.PlaceVmsXClusterResult, error) { + req := types.PlaceVmsXCluster{ + This: f.Reference(), + PlacementSpec: spec, + } + + res, err := methods.PlaceVmsXCluster(ctx, f.c, &req) + if err != nil { + return nil, err + } + + return &res.Returnval, nil +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/history_collector.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/history_collector.go deleted file mode 100644 index afdcab78b607..000000000000 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/history_collector.go +++ /dev/null @@ -1,72 +0,0 @@ -/* -Copyright (c) 2015 VMware, Inc. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package object - -import ( - "context" - - "github.com/vmware/govmomi/vim25" - "github.com/vmware/govmomi/vim25/methods" - "github.com/vmware/govmomi/vim25/types" -) - -type HistoryCollector struct { - Common -} - -func NewHistoryCollector(c *vim25.Client, ref types.ManagedObjectReference) *HistoryCollector { - return &HistoryCollector{ - Common: NewCommon(c, ref), - } -} - -func (h HistoryCollector) Destroy(ctx context.Context) error { - req := types.DestroyCollector{ - This: h.Reference(), - } - - _, err := methods.DestroyCollector(ctx, h.c, &req) - return err -} - -func (h HistoryCollector) Reset(ctx context.Context) error { - req := types.ResetCollector{ - This: h.Reference(), - } - - _, err := methods.ResetCollector(ctx, h.c, &req) - return err -} - -func (h HistoryCollector) Rewind(ctx context.Context) error { - req := types.RewindCollector{ - This: h.Reference(), - } - - _, err := methods.RewindCollector(ctx, h.c, &req) - return err -} - -func (h HistoryCollector) SetPageSize(ctx context.Context, maxCount int32) error { - req := types.SetCollectorPageSize{ - This: h.Reference(), - MaxCount: maxCount, - } - - _, err := methods.SetCollectorPageSize(ctx, h.c, &req) - return err -} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/host_config_manager.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/host_config_manager.go index 123227ecc706..eac59a32eb44 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/host_config_manager.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/host_config_manager.go @@ -18,9 +18,9 @@ package object import ( "context" + "fmt" "github.com/vmware/govmomi/vim25" - "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" ) @@ -34,163 +34,138 @@ func NewHostConfigManager(c *vim25.Client, ref types.ManagedObjectReference) *Ho } } -func (m HostConfigManager) DatastoreSystem(ctx context.Context) (*HostDatastoreSystem, error) { - var h mo.HostSystem +// reference returns the ManagedObjectReference for the given HostConfigManager property name. +// An error is returned if the field is nil, of type ErrNotSupported if versioned is true. +func (m HostConfigManager) reference(ctx context.Context, name string, versioned ...bool) (types.ManagedObjectReference, error) { + prop := "configManager." + name + var content []types.ObjectContent - err := m.Properties(ctx, m.Reference(), []string{"configManager.datastoreSystem"}, &h) + err := m.Properties(ctx, m.Reference(), []string{prop}, &content) if err != nil { - return nil, err + return types.ManagedObjectReference{}, err } - return NewHostDatastoreSystem(m.c, *h.ConfigManager.DatastoreSystem), nil -} + for _, c := range content { + for _, p := range c.PropSet { + if p.Name != prop { + continue + } + if ref, ok := p.Val.(types.ManagedObjectReference); ok { + return ref, nil + } + } + } -func (m HostConfigManager) NetworkSystem(ctx context.Context) (*HostNetworkSystem, error) { - var h mo.HostSystem + err = fmt.Errorf("%s %s is nil", m.Reference(), prop) + if len(versioned) == 1 && versioned[0] { + err = ErrNotSupported + } + return types.ManagedObjectReference{}, err +} - err := m.Properties(ctx, m.Reference(), []string{"configManager.networkSystem"}, &h) +func (m HostConfigManager) DatastoreSystem(ctx context.Context) (*HostDatastoreSystem, error) { + ref, err := m.reference(ctx, "datastoreSystem") if err != nil { return nil, err } + return NewHostDatastoreSystem(m.c, ref), nil +} - return NewHostNetworkSystem(m.c, *h.ConfigManager.NetworkSystem), nil +func (m HostConfigManager) NetworkSystem(ctx context.Context) (*HostNetworkSystem, error) { + ref, err := m.reference(ctx, "networkSystem") + if err != nil { + return nil, err + } + return NewHostNetworkSystem(m.c, ref), nil } func (m HostConfigManager) FirewallSystem(ctx context.Context) (*HostFirewallSystem, error) { - var h mo.HostSystem - - err := m.Properties(ctx, m.Reference(), []string{"configManager.firewallSystem"}, &h) + ref, err := m.reference(ctx, "firewallSystem") if err != nil { return nil, err } - return NewHostFirewallSystem(m.c, *h.ConfigManager.FirewallSystem), nil + return NewHostFirewallSystem(m.c, ref), nil } func (m HostConfigManager) StorageSystem(ctx context.Context) (*HostStorageSystem, error) { - var h mo.HostSystem - - err := m.Properties(ctx, m.Reference(), []string{"configManager.storageSystem"}, &h) + ref, err := m.reference(ctx, "storageSystem") if err != nil { return nil, err } - - return NewHostStorageSystem(m.c, *h.ConfigManager.StorageSystem), nil + return NewHostStorageSystem(m.c, ref), nil } func (m HostConfigManager) VirtualNicManager(ctx context.Context) (*HostVirtualNicManager, error) { - var h mo.HostSystem - - err := m.Properties(ctx, m.Reference(), []string{"configManager.virtualNicManager"}, &h) + ref, err := m.reference(ctx, "virtualNicManager") if err != nil { return nil, err } - - return NewHostVirtualNicManager(m.c, *h.ConfigManager.VirtualNicManager, m.Reference()), nil + return NewHostVirtualNicManager(m.c, ref, m.Reference()), nil } func (m HostConfigManager) VsanSystem(ctx context.Context) (*HostVsanSystem, error) { - var h mo.HostSystem - - err := m.Properties(ctx, m.Reference(), []string{"configManager.vsanSystem"}, &h) + ref, err := m.reference(ctx, "vsanSystem", true) // Added in 5.5 if err != nil { return nil, err } - - // Added in 5.5 - if h.ConfigManager.VsanSystem == nil { - return nil, ErrNotSupported - } - - return NewHostVsanSystem(m.c, *h.ConfigManager.VsanSystem), nil + return NewHostVsanSystem(m.c, ref), nil } func (m HostConfigManager) VsanInternalSystem(ctx context.Context) (*HostVsanInternalSystem, error) { - var h mo.HostSystem - - err := m.Properties(ctx, m.Reference(), []string{"configManager.vsanInternalSystem"}, &h) + ref, err := m.reference(ctx, "vsanInternalSystem", true) // Added in 5.5 if err != nil { return nil, err } - - // Added in 5.5 - if h.ConfigManager.VsanInternalSystem == nil { - return nil, ErrNotSupported - } - - return NewHostVsanInternalSystem(m.c, *h.ConfigManager.VsanInternalSystem), nil + return NewHostVsanInternalSystem(m.c, ref), nil } func (m HostConfigManager) AccountManager(ctx context.Context) (*HostAccountManager, error) { - var h mo.HostSystem - - err := m.Properties(ctx, m.Reference(), []string{"configManager.accountManager"}, &h) + ref, err := m.reference(ctx, "accountManager", true) // Added in 5.5 if err != nil { - return nil, err - } - - ref := h.ConfigManager.AccountManager // Added in 6.0 - if ref == nil { - // Versions < 5.5 can use the ServiceContent ref, - // but we can only use it when connected directly to ESX. - c := m.Client() - if !c.IsVC() { - ref = c.ServiceContent.AccountManager - } - - if ref == nil { - return nil, ErrNotSupported + if err == ErrNotSupported { + // Versions < 5.5 can use the ServiceContent ref, + // but only when connected directly to ESX. + if m.c.ServiceContent.AccountManager == nil { + return nil, err + } + ref = *m.c.ServiceContent.AccountManager + } else { + return nil, err } } - return NewHostAccountManager(m.c, *ref), nil + return NewHostAccountManager(m.c, ref), nil } func (m HostConfigManager) OptionManager(ctx context.Context) (*OptionManager, error) { - var h mo.HostSystem - - err := m.Properties(ctx, m.Reference(), []string{"configManager.advancedOption"}, &h) + ref, err := m.reference(ctx, "advancedOption") if err != nil { return nil, err } - - return NewOptionManager(m.c, *h.ConfigManager.AdvancedOption), nil + return NewOptionManager(m.c, ref), nil } func (m HostConfigManager) ServiceSystem(ctx context.Context) (*HostServiceSystem, error) { - var h mo.HostSystem - - err := m.Properties(ctx, m.Reference(), []string{"configManager.serviceSystem"}, &h) + ref, err := m.reference(ctx, "serviceSystem") if err != nil { return nil, err } - - return NewHostServiceSystem(m.c, *h.ConfigManager.ServiceSystem), nil + return NewHostServiceSystem(m.c, ref), nil } func (m HostConfigManager) CertificateManager(ctx context.Context) (*HostCertificateManager, error) { - var h mo.HostSystem - - err := m.Properties(ctx, m.Reference(), []string{"configManager.certificateManager"}, &h) + ref, err := m.reference(ctx, "certificateManager", true) // Added in 6.0 if err != nil { return nil, err } - - // Added in 6.0 - if h.ConfigManager.CertificateManager == nil { - return nil, ErrNotSupported - } - - return NewHostCertificateManager(m.c, *h.ConfigManager.CertificateManager, m.Reference()), nil + return NewHostCertificateManager(m.c, ref, m.Reference()), nil } func (m HostConfigManager) DateTimeSystem(ctx context.Context) (*HostDateTimeSystem, error) { - var h mo.HostSystem - - err := m.Properties(ctx, m.Reference(), []string{"configManager.dateTimeSystem"}, &h) + ref, err := m.reference(ctx, "dateTimeSystem") if err != nil { return nil, err } - - return NewHostDateTimeSystem(m.c, *h.ConfigManager.DateTimeSystem), nil + return NewHostDateTimeSystem(m.c, ref), nil } diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/host_datastore_system.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/host_datastore_system.go index 7b738e611ed8..64f3add917e9 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/host_datastore_system.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/host_datastore_system.go @@ -117,3 +117,19 @@ func (s HostDatastoreSystem) QueryVmfsDatastoreCreateOptions(ctx context.Context return res.Returnval, nil } + +func (s HostDatastoreSystem) ResignatureUnresolvedVmfsVolumes(ctx context.Context, devicePaths []string) (*Task, error) { + req := &types.ResignatureUnresolvedVmfsVolume_Task{ + This: s.Reference(), + ResolutionSpec: types.HostUnresolvedVmfsResignatureSpec{ + ExtentDevicePath: devicePaths, + }, + } + + res, err := methods.ResignatureUnresolvedVmfsVolume_Task(ctx, s.Client(), req) + if err != nil { + return nil, err + } + + return NewTask(s.c, res.Returnval), nil +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/host_network_system.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/host_network_system.go index c21e1ec35d57..340b764a5144 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/host_network_system.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/host_network_system.go @@ -98,18 +98,18 @@ func (o HostNetworkSystem) AddVirtualSwitch(ctx context.Context, vswitchName str } // QueryNetworkHint wraps methods.QueryNetworkHint -func (o HostNetworkSystem) QueryNetworkHint(ctx context.Context, device []string) error { +func (o HostNetworkSystem) QueryNetworkHint(ctx context.Context, device []string) ([]types.PhysicalNicHintInfo, error) { req := types.QueryNetworkHint{ This: o.Reference(), Device: device, } - _, err := methods.QueryNetworkHint(ctx, o.c, &req) + res, err := methods.QueryNetworkHint(ctx, o.c, &req) if err != nil { - return err + return nil, err } - return nil + return res.Returnval, err } // RefreshNetworkSystem wraps methods.RefreshNetworkSystem diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/host_storage_system.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/host_storage_system.go index 5990a1d4e6bb..5c9f08eee12a 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/host_storage_system.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/host_storage_system.go @@ -172,3 +172,29 @@ func (s HostStorageSystem) AttachScsiLun(ctx context.Context, uuid string) error return err } + +func (s HostStorageSystem) QueryUnresolvedVmfsVolumes(ctx context.Context) ([]types.HostUnresolvedVmfsVolume, error) { + req := &types.QueryUnresolvedVmfsVolume{ + This: s.Reference(), + } + + res, err := methods.QueryUnresolvedVmfsVolume(ctx, s.Client(), req) + if err != nil { + return nil, err + } + return res.Returnval, nil +} + +func (s HostStorageSystem) UnmountVmfsVolume(ctx context.Context, vmfsUuid string) error { + req := &types.UnmountVmfsVolume{ + This: s.Reference(), + VmfsUuid: vmfsUuid, + } + + _, err := methods.UnmountVmfsVolume(ctx, s.Client(), req) + if err != nil { + return err + } + + return nil +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/host_system.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/host_system.go index a350edfdebd6..ddf6cb8f11ac 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/host_system.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/host_system.go @@ -21,6 +21,7 @@ import ( "fmt" "net" + "github.com/vmware/govmomi/internal" "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/methods" "github.com/vmware/govmomi/vim25/mo" @@ -81,17 +82,17 @@ func (h HostSystem) ManagementIPs(ctx context.Context) ([]net.IP, error) { return nil, err } - var ips []net.IP - for _, nc := range mh.Config.VirtualNicManagerInfo.NetConfig { - if nc.NicType == "management" && len(nc.CandidateVnic) > 0 { - ip := net.ParseIP(nc.CandidateVnic[0].Spec.Ip.IpAddress) - if ip != nil { - ips = append(ips, ip) - } - } + config := mh.Config + if config == nil { + return nil, nil } - return ips, nil + info := config.VirtualNicManagerInfo + if info == nil { + return nil, nil + } + + return internal.HostSystemManagementIPs(info.NetConfig), nil } func (h HostSystem) Disconnect(ctx context.Context) (*Task, error) { diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/network.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/network.go index d1dc7ce01f61..f209a7ac4c92 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/network.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/network.go @@ -20,7 +20,6 @@ import ( "context" "github.com/vmware/govmomi/vim25" - "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" ) @@ -34,21 +33,20 @@ func NewNetwork(c *vim25.Client, ref types.ManagedObjectReference) *Network { } } +func (n Network) GetInventoryPath() string { + return n.InventoryPath +} + // EthernetCardBackingInfo returns the VirtualDeviceBackingInfo for this Network func (n Network) EthernetCardBackingInfo(ctx context.Context) (types.BaseVirtualDeviceBackingInfo, error) { - var e mo.Network - - // Use Network.Name rather than Common.Name as the latter does not return the complete name if it contains a '/' - // We can't use Common.ObjectName here either as we need the ManagedEntity.Name field is not set since mo.Network - // has its own Name field. - err := n.Properties(ctx, n.Reference(), []string{"name"}, &e) + name, err := n.ObjectName(ctx) if err != nil { return nil, err } backing := &types.VirtualEthernetCardNetworkBackingInfo{ VirtualDeviceDeviceBackingInfo: types.VirtualDeviceDeviceBackingInfo{ - DeviceName: e.Name, + DeviceName: name, }, } diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/network_reference.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/network_reference.go index 7716bdb38fae..f1a41cd59b66 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/network_reference.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/network_reference.go @@ -26,6 +26,6 @@ import ( // which can be used as the backing for a VirtualEthernetCard. type NetworkReference interface { Reference - + GetInventoryPath() string EthernetCardBackingInfo(ctx context.Context) (types.BaseVirtualDeviceBackingInfo, error) } diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/opaque_network.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/opaque_network.go index 47ce4cefe622..6797d325de7c 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/opaque_network.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/opaque_network.go @@ -35,19 +35,17 @@ func NewOpaqueNetwork(c *vim25.Client, ref types.ManagedObjectReference) *Opaque } } +func (n OpaqueNetwork) GetInventoryPath() string { + return n.InventoryPath +} + // EthernetCardBackingInfo returns the VirtualDeviceBackingInfo for this Network func (n OpaqueNetwork) EthernetCardBackingInfo(ctx context.Context) (types.BaseVirtualDeviceBackingInfo, error) { - var net mo.OpaqueNetwork - - if err := n.Properties(ctx, n.Reference(), []string{"summary"}, &net); err != nil { + summary, err := n.Summary(ctx) + if err != nil { return nil, err } - summary, ok := net.Summary.(*types.OpaqueNetworkSummary) - if !ok { - return nil, fmt.Errorf("%s unsupported network type: %T", n, net.Summary) - } - backing := &types.VirtualEthernetCardOpaqueNetworkBackingInfo{ OpaqueNetworkId: summary.OpaqueNetworkId, OpaqueNetworkType: summary.OpaqueNetworkType, @@ -55,3 +53,20 @@ func (n OpaqueNetwork) EthernetCardBackingInfo(ctx context.Context) (types.BaseV return backing, nil } + +// Summary returns the mo.OpaqueNetwork.Summary property +func (n OpaqueNetwork) Summary(ctx context.Context) (*types.OpaqueNetworkSummary, error) { + var props mo.OpaqueNetwork + + err := n.Properties(ctx, n.Reference(), []string{"summary"}, &props) + if err != nil { + return nil, err + } + + summary, ok := props.Summary.(*types.OpaqueNetworkSummary) + if !ok { + return nil, fmt.Errorf("%s unsupported network summary type: %T", n, props.Summary) + } + + return summary, nil +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/resource_pool.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/resource_pool.go index 55c2e2b2fdea..e510006b4007 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/resource_pool.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/resource_pool.go @@ -22,6 +22,7 @@ import ( "github.com/vmware/govmomi/nfc" "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" ) @@ -35,6 +36,18 @@ func NewResourcePool(c *vim25.Client, ref types.ManagedObjectReference) *Resourc } } +// Owner returns the ResourcePool owner as a ClusterComputeResource or ComputeResource. +func (p ResourcePool) Owner(ctx context.Context) (Reference, error) { + var pool mo.ResourcePool + + err := p.Properties(ctx, p.Reference(), []string{"owner"}, &pool) + if err != nil { + return nil, err + } + + return NewReference(p.Client(), pool.Owner), nil +} + func (p ResourcePool) ImportVApp(ctx context.Context, spec types.BaseImportSpec, folder *Folder, host *HostSystem) (*nfc.Lease, error) { req := types.ImportVApp{ This: p.Reference(), diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/search_index.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/search_index.go index 4b0a525d5288..bcf5e29f2718 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/search_index.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/search_index.go @@ -161,3 +161,88 @@ func (s SearchIndex) FindChild(ctx context.Context, entity Reference, name strin } return NewReference(s.c, *res.Returnval), nil } + +// FindAllByDnsName finds all virtual machines or hosts by DNS name. +func (s SearchIndex) FindAllByDnsName(ctx context.Context, dc *Datacenter, dnsName string, vmSearch bool) ([]Reference, error) { + req := types.FindAllByDnsName{ + This: s.Reference(), + DnsName: dnsName, + VmSearch: vmSearch, + } + if dc != nil { + ref := dc.Reference() + req.Datacenter = &ref + } + + res, err := methods.FindAllByDnsName(ctx, s.c, &req) + if err != nil { + return nil, err + } + + if len(res.Returnval) == 0 { + return nil, nil + } + + var references []Reference + for _, returnval := range res.Returnval { + references = append(references, NewReference(s.c, returnval)) + } + return references, nil +} + +// FindAllByIp finds all virtual machines or hosts by IP address. +func (s SearchIndex) FindAllByIp(ctx context.Context, dc *Datacenter, ip string, vmSearch bool) ([]Reference, error) { + req := types.FindAllByIp{ + This: s.Reference(), + Ip: ip, + VmSearch: vmSearch, + } + if dc != nil { + ref := dc.Reference() + req.Datacenter = &ref + } + + res, err := methods.FindAllByIp(ctx, s.c, &req) + if err != nil { + return nil, err + } + + if len(res.Returnval) == 0 { + return nil, nil + } + + var references []Reference + for _, returnval := range res.Returnval { + references = append(references, NewReference(s.c, returnval)) + } + return references, nil +} + +// FindAllByUuid finds all virtual machines or hosts by UUID. +func (s SearchIndex) FindAllByUuid(ctx context.Context, dc *Datacenter, uuid string, vmSearch bool, instanceUuid *bool) ([]Reference, error) { + req := types.FindAllByUuid{ + This: s.Reference(), + Uuid: uuid, + VmSearch: vmSearch, + InstanceUuid: instanceUuid, + } + if dc != nil { + ref := dc.Reference() + req.Datacenter = &ref + } + + res, err := methods.FindAllByUuid(ctx, s.c, &req) + if err != nil { + return nil, err + } + + if len(res.Returnval) == 0 { + return nil, nil + } + + var references []Reference + for _, returnval := range res.Returnval { + references = append(references, NewReference(s.c, returnval)) + } + return references, nil +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/task.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/task.go index 2b66aa93b7f7..088dd7f56bed 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/task.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/task.go @@ -48,9 +48,13 @@ func (t *Task) Wait(ctx context.Context) error { return err } -func (t *Task) WaitForResult(ctx context.Context, s progress.Sinker) (*types.TaskInfo, error) { +func (t *Task) WaitForResult(ctx context.Context, s ...progress.Sinker) (*types.TaskInfo, error) { + var pr progress.Sinker + if len(s) == 1 { + pr = s[0] + } p := property.DefaultCollector(t.c) - return task.Wait(ctx, t.Reference(), p, s) + return task.Wait(ctx, t.Reference(), p, pr) } func (t *Task) Cancel(ctx context.Context) error { @@ -60,3 +64,37 @@ func (t *Task) Cancel(ctx context.Context) error { return err } + +// SetState sets task state and optionally sets results or fault, as appropriate for state. +func (t *Task) SetState(ctx context.Context, state types.TaskInfoState, result types.AnyType, fault *types.LocalizedMethodFault) error { + req := types.SetTaskState{ + This: t.Reference(), + State: state, + Result: result, + Fault: fault, + } + _, err := methods.SetTaskState(ctx, t.Common.Client(), &req) + return err +} + +// SetDescription updates task description to describe the current phase of the task. +func (t *Task) SetDescription(ctx context.Context, description types.LocalizableMessage) error { + req := types.SetTaskDescription{ + This: t.Reference(), + Description: description, + } + _, err := methods.SetTaskDescription(ctx, t.Common.Client(), &req) + return err +} + +// UpdateProgress Sets percentage done for this task and recalculates overall percentage done. +// If a percentDone value of less than zero or greater than 100 is specified, +// a value of zero or 100 respectively is used. +func (t *Task) UpdateProgress(ctx context.Context, percentDone int) error { + req := types.UpdateProgress{ + This: t.Reference(), + PercentDone: int32(percentDone), + } + _, err := methods.UpdateProgress(ctx, t.Common.Client(), &req) + return err +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/tenant_manager.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/tenant_manager.go new file mode 100644 index 000000000000..4dda196e35ee --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/tenant_manager.go @@ -0,0 +1,78 @@ +/* +Copyright (c) 2021 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +type TenantManager struct { + Common +} + +func NewTenantManager(c *vim25.Client) *TenantManager { + t := TenantManager{ + Common: NewCommon(c, *c.ServiceContent.TenantManager), + } + + return &t +} + +func (t TenantManager) MarkServiceProviderEntities(ctx context.Context, entities []types.ManagedObjectReference) error { + req := types.MarkServiceProviderEntities{ + This: t.Reference(), + Entity: entities, + } + + _, err := methods.MarkServiceProviderEntities(ctx, t.Client(), &req) + if err != nil { + return err + } + + return nil +} + +func (t TenantManager) UnmarkServiceProviderEntities(ctx context.Context, entities []types.ManagedObjectReference) error { + req := types.UnmarkServiceProviderEntities{ + This: t.Reference(), + Entity: entities, + } + + _, err := methods.UnmarkServiceProviderEntities(ctx, t.Client(), &req) + if err != nil { + return err + } + + return nil +} + +func (t TenantManager) RetrieveServiceProviderEntities(ctx context.Context) ([]types.ManagedObjectReference, error) { + req := types.RetrieveServiceProviderEntities{ + This: t.Reference(), + } + + res, err := methods.RetrieveServiceProviderEntities(ctx, t.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/virtual_app.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/virtual_app.go index 4811178f167c..b7311b3e39dd 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/virtual_app.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/virtual_app.go @@ -103,3 +103,19 @@ func (p VirtualApp) Suspend(ctx context.Context) (*Task, error) { return NewTask(p.c, res.Returnval), nil } + +func (p VirtualApp) Clone(ctx context.Context, name string, target types.ManagedObjectReference, spec types.VAppCloneSpec) (*Task, error) { + req := types.CloneVApp_Task{ + This: p.Reference(), + Name: name, + Target: target, + Spec: spec, + } + + res, err := methods.CloneVApp_Task(ctx, p.c, &req) + if err != nil { + return nil, err + } + + return NewTask(p.c, res.Returnval), nil +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/virtual_device_list.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/virtual_device_list.go index e1c35eff88e3..3765506532e8 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/virtual_device_list.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/virtual_device_list.go @@ -19,6 +19,7 @@ package object import ( "errors" "fmt" + "math/rand" "path/filepath" "reflect" "regexp" @@ -63,11 +64,12 @@ func EthernetCardTypes() VirtualDeviceList { &types.VirtualE1000e{}, &types.VirtualVmxnet2{}, &types.VirtualVmxnet3{}, + &types.VirtualVmxnet3Vrdma{}, &types.VirtualPCNet32{}, &types.VirtualSriovEthernetCard{}, }).Select(func(device types.BaseVirtualDevice) bool { c := device.(types.BaseVirtualEthernetCard).GetVirtualEthernetCard() - c.GetVirtualDevice().Key = -1 + c.GetVirtualDevice().Key = VirtualDeviceList{}.newRandomKey() return true }) } @@ -134,6 +136,9 @@ func (l VirtualDeviceList) SelectByBackingInfo(backing types.BaseVirtualDeviceBa b := backing.(*types.VirtualEthernetCardDistributedVirtualPortBackingInfo) return a.Port.SwitchUuid == b.Port.SwitchUuid && a.Port.PortgroupKey == b.Port.PortgroupKey + case *types.VirtualEthernetCardOpaqueNetworkBackingInfo: + b := backing.(*types.VirtualEthernetCardOpaqueNetworkBackingInfo) + return a.OpaqueNetworkId == b.OpaqueNetworkId case *types.VirtualDiskFlatVer2BackingInfo: b := backing.(*types.VirtualDiskFlatVer2BackingInfo) if a.Parent != nil && b.Parent != nil { @@ -146,6 +151,25 @@ func (l VirtualDeviceList) SelectByBackingInfo(backing types.BaseVirtualDeviceBa case types.BaseVirtualDeviceFileBackingInfo: b := backing.(types.BaseVirtualDeviceFileBackingInfo) return a.GetVirtualDeviceFileBackingInfo().FileName == b.GetVirtualDeviceFileBackingInfo().FileName + case *types.VirtualPCIPassthroughVmiopBackingInfo: + b := backing.(*types.VirtualPCIPassthroughVmiopBackingInfo) + return a.Vgpu == b.Vgpu + case *types.VirtualPCIPassthroughDynamicBackingInfo: + b := backing.(*types.VirtualPCIPassthroughDynamicBackingInfo) + if b.CustomLabel != "" && b.CustomLabel != a.CustomLabel { + return false + } + if len(b.AllowedDevice) == 0 { + return true + } + for _, x := range a.AllowedDevice { + for _, y := range b.AllowedDevice { + if x.DeviceId == y.DeviceId && x.VendorId == y.VendorId { + return true + } + } + } + return false default: return false } @@ -229,8 +253,10 @@ func (l VirtualDeviceList) FindSCSIController(name string) (*types.VirtualSCSICo func (l VirtualDeviceList) CreateSCSIController(name string) (types.BaseVirtualDevice, error) { ctypes := SCSIControllerTypes() - if name == "scsi" || name == "" { + if name == "" || name == "scsi" { name = ctypes.Type(ctypes[0]) + } else if name == "virtualscsi" { + name = "pvscsi" // ovf VirtualSCSI mapping } found := ctypes.Select(func(device types.BaseVirtualDevice) bool { @@ -431,10 +457,22 @@ func (l VirtualDeviceList) AssignController(device types.BaseVirtualDevice, c ty d.UnitNumber = new(int32) *d.UnitNumber = l.newUnitNumber(c) if d.Key == 0 { - d.Key = -1 + d.Key = l.newRandomKey() } } +// newRandomKey returns a random negative device key. +// The generated key can be used for devices you want to add so that it does not collide with existing ones. +func (l VirtualDeviceList) newRandomKey() int32 { + // NOTE: rand.Uint32 cannot be used here because conversion from uint32 to int32 may change the sign + key := rand.Int31() * -1 + if key == 0 { + return -1 + } + + return key +} + // CreateDisk creates a new VirtualDisk device which can be added to a VM. func (l VirtualDeviceList) CreateDisk(c types.BaseVirtualController, ds types.ManagedObjectReference, name string) *types.VirtualDisk { // If name is not specified, one will be chosen for you. @@ -862,6 +900,8 @@ func (l VirtualDeviceList) Type(device types.BaseVirtualDevice) string { return "lsilogic-sas" case *types.VirtualNVMEController: return "nvme" + case *types.VirtualPrecisionClock: + return "clock" default: return l.deviceName(device) } @@ -879,7 +919,13 @@ func (l VirtualDeviceList) Name(device types.BaseVirtualDevice) string { dtype := l.Type(device) switch dtype { case DeviceTypeEthernet: - key = fmt.Sprintf("%d", UnitNumber-7) + // Ethernet devices of UnitNumber 7-19 are non-SRIOV. Ethernet devices of + // UnitNumber 45-36 descending are SRIOV + if UnitNumber <= 45 && UnitNumber >= 36 { + key = fmt.Sprintf("sriov-%d", 45-UnitNumber) + } else { + key = fmt.Sprintf("%d", UnitNumber-7) + } case DeviceTypeDisk: key = fmt.Sprintf("%d-%d", d.ControllerKey, UnitNumber) default: @@ -907,25 +953,9 @@ func (l VirtualDeviceList) ConfigSpec(op types.VirtualDeviceConfigSpecOperation) var res []types.BaseVirtualDeviceConfigSpec for _, device := range l { config := &types.VirtualDeviceConfigSpec{ - Device: device, - Operation: op, - } - - if disk, ok := device.(*types.VirtualDisk); ok { - config.FileOperation = fop - - // Special case to attach an existing disk - if op == types.VirtualDeviceConfigSpecOperationAdd && disk.CapacityInKB == 0 { - childDisk := false - if b, ok := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok { - childDisk = b.Parent != nil - } - - if !childDisk { - // Existing disk, clear file operation - config.FileOperation = "" - } - } + Device: device, + Operation: op, + FileOperation: diskFileOperation(op, fop, device), } res = append(res, config) diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/virtual_machine.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/virtual_machine.go index ba22e554f33c..eeffc19fd3c5 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/virtual_machine.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/virtual_machine.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2015-2017 VMware, Inc. All Rights Reserved. +Copyright (c) 2015-2021 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -22,6 +22,7 @@ import ( "fmt" "net" "path" + "strings" "github.com/vmware/govmomi/nfc" "github.com/vmware/govmomi/property" @@ -33,12 +34,41 @@ import ( const ( PropRuntimePowerState = "summary.runtime.powerState" + PropConfigTemplate = "summary.config.template" ) type VirtualMachine struct { Common } +// extractDiskLayoutFiles is a helper function used to extract file keys for +// all disk files attached to the virtual machine at the current point of +// running. +func extractDiskLayoutFiles(diskLayoutList []types.VirtualMachineFileLayoutExDiskLayout) []int { + var result []int + + for _, layoutExDisk := range diskLayoutList { + for _, link := range layoutExDisk.Chain { + for i := range link.FileKey { // diskDescriptor, diskExtent pairs + result = append(result, int(link.FileKey[i])) + } + } + } + + return result +} + +// removeKey is a helper function for removing a specific file key from a list +// of keys associated with disks attached to a virtual machine. +func removeKey(l *[]int, key int) { + for i, k := range *l { + if k == key { + *l = append((*l)[:i], (*l)[i+1:]...) + break + } + } +} + func NewVirtualMachine(c *vim25.Client, ref types.ManagedObjectReference) *VirtualMachine { return &VirtualMachine{ Common: NewCommon(c, ref), @@ -56,6 +86,17 @@ func (v VirtualMachine) PowerState(ctx context.Context) (types.VirtualMachinePow return o.Summary.Runtime.PowerState, nil } +func (v VirtualMachine) IsTemplate(ctx context.Context) (bool, error) { + var o mo.VirtualMachine + + err := v.Properties(ctx, v.Reference(), []string{PropConfigTemplate}, &o) + if err != nil { + return false, err + } + + return o.Summary.Config.Template, nil +} + func (v VirtualMachine) PowerOn(ctx context.Context) (*Task, error) { req := types.PowerOnVM_Task{ This: v.Reference(), @@ -169,6 +210,20 @@ func (v VirtualMachine) Clone(ctx context.Context, folder *Folder, name string, return NewTask(v.c, res.Returnval), nil } +func (v VirtualMachine) InstantClone(ctx context.Context, config types.VirtualMachineInstantCloneSpec) (*Task, error) { + req := types.InstantClone_Task{ + This: v.Reference(), + Spec: config, + } + + res, err := methods.InstantClone_Task(ctx, v.c, &req) + if err != nil { + return nil, err + } + + return NewTask(v.c, res.Returnval), nil +} + func (v VirtualMachine) Customize(ctx context.Context, spec types.CustomizationSpec) (*Task, error) { req := types.CustomizeVM_Task{ This: v.Reference(), @@ -221,7 +276,9 @@ func (v VirtualMachine) RefreshStorageInfo(ctx context.Context) error { return err } -func (v VirtualMachine) WaitForIP(ctx context.Context) (string, error) { +// WaitForIP waits for the VM guest.ipAddress property to report an IP address. +// Waits for an IPv4 address if the v4 param is true. +func (v VirtualMachine) WaitForIP(ctx context.Context, v4 ...bool) (string, error) { var ip string p := property.DefaultCollector(v.c) @@ -238,6 +295,11 @@ func (v VirtualMachine) WaitForIP(ctx context.Context) (string, error) { } ip = c.Val.(string) + if len(v4) == 1 && v4[0] { + if net.ParseIP(ip).To4() == nil { + return false + } + } return true } @@ -272,7 +334,9 @@ func (v VirtualMachine) WaitForNetIP(ctx context.Context, v4 bool, device ...str devices := VirtualDeviceList(c.Val.(types.ArrayOfVirtualDevice).VirtualDevice) for _, d := range devices { if nic, ok := d.(types.BaseVirtualEthernetCard); ok { - mac := nic.GetVirtualEthernetCard().MacAddress + // Convert to lower so that e.g. 00:50:56:83:3A:5D is treated the + // same as 00:50:56:83:3a:5d + mac := strings.ToLower(nic.GetVirtualEthernetCard().MacAddress) if mac == "" { return false } @@ -285,6 +349,10 @@ func (v VirtualMachine) WaitForNetIP(ctx context.Context, v4 bool, device ...str return true }) + if err != nil { + return nil, err + } + if len(device) != 0 { // Only wait for specific NIC(s) macs = make(map[string][]string) @@ -304,7 +372,9 @@ func (v VirtualMachine) WaitForNetIP(ctx context.Context, v4 bool, device ...str nics := c.Val.(types.ArrayOfGuestNicInfo).GuestNicInfo for _, nic := range nics { - mac := nic.MacAddress + // Convert to lower so that e.g. 00:50:56:83:3A:5D is treated the + // same as 00:50:56:83:3a:5d + mac := strings.ToLower(nic.MacAddress) if mac == "" || nic.IpConfig == nil { continue } @@ -391,29 +461,33 @@ func (v VirtualMachine) ResourcePool(ctx context.Context) (*ResourcePool, error) return NewResourcePool(v.c, *rp), nil } -func (v VirtualMachine) configureDevice(ctx context.Context, op types.VirtualDeviceConfigSpecOperation, fop types.VirtualDeviceConfigSpecFileOperation, devices ...types.BaseVirtualDevice) error { - spec := types.VirtualMachineConfigSpec{} +func diskFileOperation(op types.VirtualDeviceConfigSpecOperation, fop types.VirtualDeviceConfigSpecFileOperation, device types.BaseVirtualDevice) types.VirtualDeviceConfigSpecFileOperation { + if disk, ok := device.(*types.VirtualDisk); ok { + // Special case to attach an existing disk + if op == types.VirtualDeviceConfigSpecOperationAdd && disk.CapacityInKB == 0 && disk.CapacityInBytes == 0 { + childDisk := false + if b, ok := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok { + childDisk = b.Parent != nil + } - for _, device := range devices { - config := &types.VirtualDeviceConfigSpec{ - Device: device, - Operation: op, + if !childDisk { + fop = "" // existing disk + } } + return fop + } - if disk, ok := device.(*types.VirtualDisk); ok { - config.FileOperation = fop + return "" +} - // Special case to attach an existing disk - if op == types.VirtualDeviceConfigSpecOperationAdd && disk.CapacityInKB == 0 { - childDisk := false - if b, ok := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok { - childDisk = b.Parent != nil - } +func (v VirtualMachine) configureDevice(ctx context.Context, op types.VirtualDeviceConfigSpecOperation, fop types.VirtualDeviceConfigSpecFileOperation, devices ...types.BaseVirtualDevice) error { + spec := types.VirtualMachineConfigSpec{} - if !childDisk { - config.FileOperation = "" // existing disk - } - } + for _, device := range devices { + config := &types.VirtualDeviceConfigSpec{ + Device: device, + Operation: op, + FileOperation: diskFileOperation(op, fop, device), } spec.DeviceChange = append(spec.DeviceChange, config) @@ -446,6 +520,41 @@ func (v VirtualMachine) RemoveDevice(ctx context.Context, keepFiles bool, device return v.configureDevice(ctx, types.VirtualDeviceConfigSpecOperationRemove, fop, device...) } +// AttachDisk attaches the given disk to the VirtualMachine +func (v VirtualMachine) AttachDisk(ctx context.Context, id string, datastore *Datastore, controllerKey int32, unitNumber int32) error { + req := types.AttachDisk_Task{ + This: v.Reference(), + DiskId: types.ID{Id: id}, + Datastore: datastore.Reference(), + ControllerKey: controllerKey, + UnitNumber: &unitNumber, + } + + res, err := methods.AttachDisk_Task(ctx, v.c, &req) + if err != nil { + return err + } + + task := NewTask(v.c, res.Returnval) + return task.Wait(ctx) +} + +// DetachDisk detaches the given disk from the VirtualMachine +func (v VirtualMachine) DetachDisk(ctx context.Context, id string) error { + req := types.DetachDisk_Task{ + This: v.Reference(), + DiskId: types.ID{Id: id}, + } + + res, err := methods.DetachDisk_Task(ctx, v.c, &req) + if err != nil { + return err + } + + task := NewTask(v.c, res.Returnval) + return task.Wait(ctx) +} + // BootOptions returns the VirtualMachine's config.bootOptions property. func (v VirtualMachine) BootOptions(ctx context.Context) (*types.VirtualMachineBootOptions, error) { var o mo.VirtualMachine @@ -556,6 +665,63 @@ func (m snapshotMap) add(parent string, tree []types.VirtualMachineSnapshotTree) } } +// SnapshotSize calculates the size of a given snapshot in bytes. If the +// snapshot is current, disk files not associated with any parent snapshot are +// included in size calculations. This allows for measuring and including the +// growth from the last fixed snapshot to the present state. +func SnapshotSize(info types.ManagedObjectReference, parent *types.ManagedObjectReference, vmlayout *types.VirtualMachineFileLayoutEx, isCurrent bool) int { + var fileKeyList []int + var parentFiles []int + var allSnapshotFiles []int + + diskFiles := extractDiskLayoutFiles(vmlayout.Disk) + + for _, layout := range vmlayout.Snapshot { + diskLayout := extractDiskLayoutFiles(layout.Disk) + allSnapshotFiles = append(allSnapshotFiles, diskLayout...) + + if layout.Key.Value == info.Value { + fileKeyList = append(fileKeyList, int(layout.DataKey)) // The .vmsn file + fileKeyList = append(fileKeyList, diskLayout...) // The .vmdk files + } else if parent != nil && layout.Key.Value == parent.Value { + parentFiles = append(parentFiles, diskLayout...) + } + } + + for _, parentFile := range parentFiles { + removeKey(&fileKeyList, parentFile) + } + + for _, file := range allSnapshotFiles { + removeKey(&diskFiles, file) + } + + fileKeyMap := make(map[int]types.VirtualMachineFileLayoutExFileInfo) + for _, file := range vmlayout.File { + fileKeyMap[int(file.Key)] = file + } + + size := 0 + + for _, fileKey := range fileKeyList { + file := fileKeyMap[fileKey] + if parent != nil || + (file.Type != string(types.VirtualMachineFileLayoutExFileTypeDiskDescriptor) && + file.Type != string(types.VirtualMachineFileLayoutExFileTypeDiskExtent)) { + size += int(file.Size) + } + } + + if isCurrent { + for _, diskFile := range diskFiles { + file := fileKeyMap[diskFile] + size += int(file.Size) + } + } + + return size +} + // FindSnapshot supports snapshot lookup by name, where name can be: // 1) snapshot ManagedObjectReference.Value (unique) // 2) snapshot name (may not be unique) @@ -569,7 +735,7 @@ func (v VirtualMachine) FindSnapshot(ctx context.Context, name string) (*types.M } if o.Snapshot == nil || len(o.Snapshot.RootSnapshotList) == 0 { - return nil, errors.New("No snapshots for this VM") + return nil, errors.New("no snapshots for this VM") } m := make(snapshotMap) @@ -832,6 +998,85 @@ func (v VirtualMachine) UUID(ctx context.Context) string { if err != nil { return "" } + if o.Config != nil { + return o.Config.Uuid + } + return "" +} - return o.Config.Uuid +func (v VirtualMachine) QueryChangedDiskAreas(ctx context.Context, baseSnapshot, curSnapshot *types.ManagedObjectReference, disk *types.VirtualDisk, offset int64) (types.DiskChangeInfo, error) { + var noChange types.DiskChangeInfo + var err error + + if offset > disk.CapacityInBytes { + return noChange, fmt.Errorf("offset is greater than the disk size (%#x and %#x)", offset, disk.CapacityInBytes) + } else if offset == disk.CapacityInBytes { + return types.DiskChangeInfo{StartOffset: offset, Length: 0}, nil + } + + var b mo.VirtualMachineSnapshot + err = v.Properties(ctx, baseSnapshot.Reference(), []string{"config.hardware"}, &b) + if err != nil { + return noChange, fmt.Errorf("failed to fetch config.hardware of snapshot %s: %s", baseSnapshot, err) + } + + var changeId *string + for _, vd := range b.Config.Hardware.Device { + d := vd.GetVirtualDevice() + if d.Key != disk.Key { + continue + } + + // As per VDDK programming guide, these are the four types of disks + // that support CBT, see "Gathering Changed Block Information". + if b, ok := d.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok { + changeId = &b.ChangeId + break + } + if b, ok := d.Backing.(*types.VirtualDiskSparseVer2BackingInfo); ok { + changeId = &b.ChangeId + break + } + if b, ok := d.Backing.(*types.VirtualDiskRawDiskMappingVer1BackingInfo); ok { + changeId = &b.ChangeId + break + } + if b, ok := d.Backing.(*types.VirtualDiskRawDiskVer2BackingInfo); ok { + changeId = &b.ChangeId + break + } + + return noChange, fmt.Errorf("disk %d has backing info without .ChangeId: %t", disk.Key, d.Backing) + } + if changeId == nil || *changeId == "" { + return noChange, fmt.Errorf("CBT is not enabled on disk %d", disk.Key) + } + + req := types.QueryChangedDiskAreas{ + This: v.Reference(), + Snapshot: curSnapshot, + DeviceKey: disk.Key, + StartOffset: offset, + ChangeId: *changeId, + } + + res, err := methods.QueryChangedDiskAreas(ctx, v.Client(), &req) + if err != nil { + return noChange, err + } + + return res.Returnval, nil +} + +// ExportSnapshot exports all VMDK-files up to (but not including) a specified snapshot. This +// is useful when exporting a running VM. +func (v *VirtualMachine) ExportSnapshot(ctx context.Context, snapshot *types.ManagedObjectReference) (*nfc.Lease, error) { + req := types.ExportSnapshot{ + This: *snapshot, + } + resp, err := methods.ExportSnapshot(ctx, v.Client(), &req) + if err != nil { + return nil, err + } + return nfc.NewLease(v.c, resp.Returnval), nil } diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/vmware_distributed_virtual_switch.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/vmware_distributed_virtual_switch.go index f6caf9870837..8eccba19b1e5 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/vmware_distributed_virtual_switch.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/object/vmware_distributed_virtual_switch.go @@ -19,3 +19,7 @@ package object type VmwareDistributedVirtualSwitch struct { DistributedVirtualSwitch } + +func (s VmwareDistributedVirtualSwitch) GetInventoryPath() string { + return s.InventoryPath +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/pbm/client.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/pbm/client.go index 3e799ebfb616..ba2531ec966c 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/pbm/client.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/pbm/client.go @@ -43,6 +43,8 @@ type Client struct { *soap.Client ServiceContent types.PbmServiceInstanceContent + + RoundTripper soap.RoundTripper } func NewClient(ctx context.Context, c *vim25.Client) (*Client, error) { @@ -57,7 +59,12 @@ func NewClient(ctx context.Context, c *vim25.Client) (*Client, error) { return nil, err } - return &Client{sc, res.Returnval}, nil + return &Client{sc, res.Returnval, sc}, nil +} + +// RoundTrip dispatches to the RoundTripper field. +func (c *Client) RoundTrip(ctx context.Context, req, res soap.HasFault) error { + return c.RoundTripper.RoundTrip(ctx, req, res) } func (c *Client) QueryProfile(ctx context.Context, rtype types.PbmProfileResourceType, category string) ([]types.PbmProfileId, error) { @@ -224,3 +231,57 @@ func (c *Client) ProfileIDByName(ctx context.Context, profileName string) (strin } return "", fmt.Errorf("no pbm profile found with name: %q", profileName) } + +func (c *Client) FetchCapabilityMetadata(ctx context.Context, rtype *types.PbmProfileResourceType, vendorUuid string) ([]types.PbmCapabilityMetadataPerCategory, error) { + req := types.PbmFetchCapabilityMetadata{ + This: c.ServiceContent.ProfileManager, + ResourceType: rtype, + VendorUuid: vendorUuid, + } + + res, err := methods.PbmFetchCapabilityMetadata(ctx, c, &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +func (c *Client) FetchComplianceResult(ctx context.Context, entities []types.PbmServerObjectRef) ([]types.PbmComplianceResult, error) { + req := types.PbmFetchComplianceResult{ + This: c.ServiceContent.ComplianceManager, + Entities: entities, + } + + res, err := methods.PbmFetchComplianceResult(ctx, c, &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +// GetProfileNameByID gets storage profile name by ID +func (c *Client) GetProfileNameByID(ctx context.Context, profileID string) (string, error) { + resourceType := types.PbmProfileResourceType{ + ResourceType: string(types.PbmProfileResourceTypeEnumSTORAGE), + } + category := types.PbmProfileCategoryEnumREQUIREMENT + ids, err := c.QueryProfile(ctx, resourceType, string(category)) + if err != nil { + return "", err + } + + profiles, err := c.RetrieveContent(ctx, ids) + if err != nil { + return "", err + } + + for i := range profiles { + profile := profiles[i].GetPbmProfile() + if profile.ProfileId.UniqueId == profileID { + return profile.Name, nil + } + } + return "", fmt.Errorf("no pbm profile found with id: %q", profileID) +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/pbm/methods/methods.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/pbm/methods/methods.go index 1776773061bf..fa7f2b200ffc 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/pbm/methods/methods.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/pbm/methods/methods.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2014-2018 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2022 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/pbm/pbm_util.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/pbm/pbm_util.go index 4cc8085f3446..d773b8dbb776 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/pbm/pbm_util.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/pbm/pbm_util.go @@ -27,6 +27,7 @@ import ( // A struct to capture pbm create spec details. type CapabilityProfileCreateSpec struct { Name string + SubProfileName string Description string Category string CapabilityList []Capability @@ -64,6 +65,7 @@ func CreateCapabilityProfileSpec(pbmCreateSpec CapabilityProfileCreateSpec) (*ty SubProfiles: []types.PbmCapabilitySubProfile{ types.PbmCapabilitySubProfile{ Capability: capabilities, + Name: pbmCreateSpec.SubProfileName, }, }, }, diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/pbm/types/enum.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/pbm/types/enum.go index ddd1a5f78617..66751bb1f4da 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/pbm/types/enum.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/pbm/types/enum.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2014-2018 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2022 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -22,6 +22,18 @@ import ( "github.com/vmware/govmomi/vim25/types" ) +type PbmAssociateAndApplyPolicyStatusPolicyStatus string + +const ( + PbmAssociateAndApplyPolicyStatusPolicyStatusSuccess = PbmAssociateAndApplyPolicyStatusPolicyStatus("success") + PbmAssociateAndApplyPolicyStatusPolicyStatusFailed = PbmAssociateAndApplyPolicyStatusPolicyStatus("failed") + PbmAssociateAndApplyPolicyStatusPolicyStatusInvalid = PbmAssociateAndApplyPolicyStatusPolicyStatus("invalid") +) + +func init() { + types.Add("pbm:PbmAssociateAndApplyPolicyStatusPolicyStatus", reflect.TypeOf((*PbmAssociateAndApplyPolicyStatusPolicyStatus)(nil)).Elem()) +} + type PbmBuiltinGenericType string const ( @@ -104,6 +116,30 @@ func init() { types.Add("pbm:PbmComplianceStatus", reflect.TypeOf((*PbmComplianceStatus)(nil)).Elem()) } +type PbmDebugManagerKeystoreName string + +const ( + PbmDebugManagerKeystoreNameSMS = PbmDebugManagerKeystoreName("SMS") + PbmDebugManagerKeystoreNameTRUSTED_ROOTS = PbmDebugManagerKeystoreName("TRUSTED_ROOTS") +) + +func init() { + types.Add("pbm:PbmDebugManagerKeystoreName", reflect.TypeOf((*PbmDebugManagerKeystoreName)(nil)).Elem()) +} + +type PbmHealthStatusForEntity string + +const ( + PbmHealthStatusForEntityRed = PbmHealthStatusForEntity("red") + PbmHealthStatusForEntityYellow = PbmHealthStatusForEntity("yellow") + PbmHealthStatusForEntityGreen = PbmHealthStatusForEntity("green") + PbmHealthStatusForEntityUnknown = PbmHealthStatusForEntity("unknown") +) + +func init() { + types.Add("pbm:PbmHealthStatusForEntity", reflect.TypeOf((*PbmHealthStatusForEntity)(nil)).Elem()) +} + type PbmIofilterInfoFilterType string const ( @@ -131,12 +167,42 @@ const ( PbmLineOfServiceInfoLineOfServiceEnumPERSISTENCE = PbmLineOfServiceInfoLineOfServiceEnum("PERSISTENCE") PbmLineOfServiceInfoLineOfServiceEnumDATA_PROVIDER = PbmLineOfServiceInfoLineOfServiceEnum("DATA_PROVIDER") PbmLineOfServiceInfoLineOfServiceEnumDATASTORE_IO_CONTROL = PbmLineOfServiceInfoLineOfServiceEnum("DATASTORE_IO_CONTROL") + PbmLineOfServiceInfoLineOfServiceEnumDATA_PROTECTION = PbmLineOfServiceInfoLineOfServiceEnum("DATA_PROTECTION") ) func init() { types.Add("pbm:PbmLineOfServiceInfoLineOfServiceEnum", reflect.TypeOf((*PbmLineOfServiceInfoLineOfServiceEnum)(nil)).Elem()) } +type PbmLoggingConfigurationComponent string + +const ( + PbmLoggingConfigurationComponentPbm = PbmLoggingConfigurationComponent("pbm") + PbmLoggingConfigurationComponentVslm = PbmLoggingConfigurationComponent("vslm") + PbmLoggingConfigurationComponentSms = PbmLoggingConfigurationComponent("sms") + PbmLoggingConfigurationComponentSpbm = PbmLoggingConfigurationComponent("spbm") + PbmLoggingConfigurationComponentSps = PbmLoggingConfigurationComponent("sps") + PbmLoggingConfigurationComponentHttpclient_header = PbmLoggingConfigurationComponent("httpclient_header") + PbmLoggingConfigurationComponentHttpclient_content = PbmLoggingConfigurationComponent("httpclient_content") + PbmLoggingConfigurationComponentVmomi = PbmLoggingConfigurationComponent("vmomi") +) + +func init() { + types.Add("pbm:PbmLoggingConfigurationComponent", reflect.TypeOf((*PbmLoggingConfigurationComponent)(nil)).Elem()) +} + +type PbmLoggingConfigurationLogLevel string + +const ( + PbmLoggingConfigurationLogLevelINFO = PbmLoggingConfigurationLogLevel("INFO") + PbmLoggingConfigurationLogLevelDEBUG = PbmLoggingConfigurationLogLevel("DEBUG") + PbmLoggingConfigurationLogLevelTRACE = PbmLoggingConfigurationLogLevel("TRACE") +) + +func init() { + types.Add("pbm:PbmLoggingConfigurationLogLevel", reflect.TypeOf((*PbmLoggingConfigurationLogLevel)(nil)).Elem()) +} + type PbmObjectType string const ( @@ -145,6 +211,8 @@ const ( PbmObjectTypeVirtualDiskId = PbmObjectType("virtualDiskId") PbmObjectTypeVirtualDiskUUID = PbmObjectType("virtualDiskUUID") PbmObjectTypeDatastore = PbmObjectType("datastore") + PbmObjectTypeVsanObjectId = PbmObjectType("vsanObjectId") + PbmObjectTypeFileShareId = PbmObjectType("fileShareId") PbmObjectTypeUnknown = PbmObjectType("unknown") ) @@ -166,6 +234,18 @@ func init() { types.Add("pbm:PbmOperation", reflect.TypeOf((*PbmOperation)(nil)).Elem()) } +type PbmPolicyAssociationVolumeAllocationType string + +const ( + PbmPolicyAssociationVolumeAllocationTypeFullyInitialized = PbmPolicyAssociationVolumeAllocationType("FullyInitialized") + PbmPolicyAssociationVolumeAllocationTypeReserveSpace = PbmPolicyAssociationVolumeAllocationType("ReserveSpace") + PbmPolicyAssociationVolumeAllocationTypeConserveSpaceWhenPossible = PbmPolicyAssociationVolumeAllocationType("ConserveSpaceWhenPossible") +) + +func init() { + types.Add("pbm:PbmPolicyAssociationVolumeAllocationType", reflect.TypeOf((*PbmPolicyAssociationVolumeAllocationType)(nil)).Elem()) +} + type PbmProfileCategoryEnum string const ( @@ -191,9 +271,10 @@ func init() { type PbmSystemCreatedProfileType string const ( - PbmSystemCreatedProfileTypeVsanDefaultProfile = PbmSystemCreatedProfileType("VsanDefaultProfile") - PbmSystemCreatedProfileTypeVVolDefaultProfile = PbmSystemCreatedProfileType("VVolDefaultProfile") - PbmSystemCreatedProfileTypePmemDefaultProfile = PbmSystemCreatedProfileType("PmemDefaultProfile") + PbmSystemCreatedProfileTypeVsanDefaultProfile = PbmSystemCreatedProfileType("VsanDefaultProfile") + PbmSystemCreatedProfileTypeVVolDefaultProfile = PbmSystemCreatedProfileType("VVolDefaultProfile") + PbmSystemCreatedProfileTypePmemDefaultProfile = PbmSystemCreatedProfileType("PmemDefaultProfile") + PbmSystemCreatedProfileTypeVsanMaxDefaultProfile = PbmSystemCreatedProfileType("VsanMaxDefaultProfile") ) func init() { diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/pbm/types/if.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/pbm/types/if.go index 832df5e1d52e..a740a25dabee 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/pbm/types/if.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/pbm/types/if.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2014-2018 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2022 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/pbm/types/types.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/pbm/types/types.go index 015482d08552..1687df447db1 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/pbm/types/types.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/pbm/types/types.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2014-2018 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2022 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -909,6 +909,17 @@ func init() { types.Add("pbm:PbmFaultInvalidLoginFault", reflect.TypeOf((*PbmFaultInvalidLoginFault)(nil)).Elem()) } +type PbmFaultNoPermissionEntityPrivileges struct { + types.DynamicData + + ProfileId *PbmProfileId `xml:"profileId,omitempty"` + PrivilegeIds []string `xml:"privilegeIds,omitempty"` +} + +func init() { + types.Add("pbm:PbmFaultNoPermissionEntityPrivileges", reflect.TypeOf((*PbmFaultNoPermissionEntityPrivileges)(nil)).Elem()) +} + type PbmFaultNotFound struct { PbmFault } @@ -997,6 +1008,17 @@ type PbmFetchComplianceResultResponse struct { Returnval []PbmComplianceResult `xml:"returnval,omitempty"` } +type PbmFetchEntityHealthStatusSpec struct { + types.DynamicData + + ObjectRef PbmServerObjectRef `xml:"objectRef"` + BackingId string `xml:"backingId,omitempty"` +} + +func init() { + types.Add("pbm:PbmFetchEntityHealthStatusSpec", reflect.TypeOf((*PbmFetchEntityHealthStatusSpec)(nil)).Elem()) +} + type PbmFetchResourceType PbmFetchResourceTypeRequestType func init() { @@ -1114,6 +1136,17 @@ func init() { types.Add("pbm:PbmLineOfServiceInfo", reflect.TypeOf((*PbmLineOfServiceInfo)(nil)).Elem()) } +type PbmLoggingConfiguration struct { + types.DynamicData + + Component string `xml:"component"` + LogLevel string `xml:"logLevel"` +} + +func init() { + types.Add("pbm:PbmLoggingConfiguration", reflect.TypeOf((*PbmLoggingConfiguration)(nil)).Elem()) +} + type PbmNonExistentHubs struct { PbmFault diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/property/collector.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/property/collector.go index b77e60061156..8798ceacbf17 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/property/collector.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/property/collector.go @@ -97,10 +97,16 @@ func (p *Collector) CreateFilter(ctx context.Context, req types.CreateFilter) er return nil } -func (p *Collector) WaitForUpdates(ctx context.Context, v string) (*types.UpdateSet, error) { +func (p *Collector) WaitForUpdates(ctx context.Context, version string, opts ...*types.WaitOptions) (*types.UpdateSet, error) { req := types.WaitForUpdatesEx{ This: p.Reference(), - Version: v, + Version: version, + } + + if len(opts) == 1 { + req.Options = opts[0] + } else if len(opts) > 1 { + panic("only one option may be specified") } res, err := methods.WaitForUpdatesEx(ctx, p.roundTripper, &req) @@ -143,7 +149,7 @@ func (p *Collector) Retrieve(ctx context.Context, objs []types.ManagedObjectRefe spec := types.PropertySpec{ Type: obj.Type, } - if ps == nil { + if len(ps) == 0 { spec.All = types.NewBool(true) } else { spec.PathSet = ps @@ -179,7 +185,7 @@ func (p *Collector) Retrieve(ctx context.Context, objs []types.ManagedObjectRefe return nil } - return mo.LoadRetrievePropertiesResponse(res, dst) + return mo.LoadObjectContent(res.Returnval, dst) } // RetrieveWithFilter populates dst as Retrieve does, but only for entities matching the given filter. diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/property/filter.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/property/filter.go index a4bf16d0555d..2287bbfd96da 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/property/filter.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/property/filter.go @@ -42,6 +42,9 @@ func (f Filter) Keys() []string { // MatchProperty returns true if a Filter entry matches the given prop. func (f Filter) MatchProperty(prop types.DynamicProperty) bool { + if prop.Val == nil { + return false + } match, ok := f[prop.Name] if !ok { return false @@ -74,7 +77,7 @@ func (f Filter) MatchProperty(prop types.DynamicProperty) bool { } // convert if we can - switch prop.Val.(type) { + switch val := prop.Val.(type) { case bool: match, _ = strconv.ParseBool(s) case int16: @@ -91,7 +94,9 @@ func (f Filter) MatchProperty(prop types.DynamicProperty) bool { case float64: match, _ = strconv.ParseFloat(s, 64) case fmt.Stringer: - prop.Val = prop.Val.(fmt.Stringer).String() + prop.Val = val.String() + case *types.CustomFieldStringValue: + prop.Val = fmt.Sprintf("%d:%s", val.Key, val.Value) default: if ptype.Kind() != reflect.String { return false @@ -125,7 +130,7 @@ func (f Filter) MatchPropertyList(props []types.DynamicProperty) bool { return len(f) == len(props) // false if a property such as VM "guest" is unset } -// MatchObjectContent returns a list of ObjectContent.Obj where the ObjectContent.PropSet matches the Filter. +// MatchObjectContent returns a list of ObjectContent.Obj where the ObjectContent.PropSet matches all properties the Filter. func (f Filter) MatchObjectContent(objects []types.ObjectContent) []types.ManagedObjectReference { var refs []types.ManagedObjectReference @@ -137,3 +142,27 @@ func (f Filter) MatchObjectContent(objects []types.ObjectContent) []types.Manage return refs } + +// MatchAnyPropertyList returns true if any given props match the Filter. +func (f Filter) MatchAnyPropertyList(props []types.DynamicProperty) bool { + for _, p := range props { + if f.MatchProperty(p) { + return true + } + } + + return false +} + +// MatchAnyObjectContent returns a list of ObjectContent.Obj where the ObjectContent.PropSet matches any property in the Filter. +func (f Filter) MatchAnyObjectContent(objects []types.ObjectContent) []types.ManagedObjectReference { + var refs []types.ManagedObjectReference + + for _, o := range objects { + if f.MatchAnyPropertyList(o.PropSet) { + refs = append(refs, o.Obj) + } + } + + return refs +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/property/wait.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/property/wait.go index f730525ca701..fbb680771195 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/property/wait.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/property/wait.go @@ -20,13 +20,16 @@ import ( "context" "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/soap" "github.com/vmware/govmomi/vim25/types" ) // WaitFilter provides helpers to construct a types.CreateFilter for use with property.Wait type WaitFilter struct { types.CreateFilter - Options *types.WaitOptions + Options *types.WaitOptions + PropagateMissing bool + Truncated bool } // Add a new ObjectSpec and PropertySpec to the WaitFilter @@ -81,6 +84,8 @@ func Wait(ctx context.Context, c *Collector, obj types.ManagedObjectReference, p // The newly created collector is destroyed before this function returns (both // in case of success or error). // +// By default, ObjectUpdate.MissingSet faults are not propagated to the returned error, +// set WaitFilter.PropagateMissing=true to enable MissingSet fault propagation. func WaitForUpdates(ctx context.Context, c *Collector, filter *WaitFilter, f func([]types.ObjectUpdate) bool) error { p, err := c.Create(ctx) if err != nil { @@ -89,7 +94,9 @@ func WaitForUpdates(ctx context.Context, c *Collector, filter *WaitFilter, f fun // Attempt to destroy the collector using the background context, as the // specified context may have timed out or have been canceled. - defer p.Destroy(context.Background()) + defer func() { + _ = p.Destroy(context.Background()) + }() err = p.CreateFilter(ctx, filter.CreateFilter) if err != nil { @@ -121,8 +128,21 @@ func WaitForUpdates(ctx context.Context, c *Collector, filter *WaitFilter, f fun } req.Version = set.Version + filter.Truncated = false + if set.Truncated != nil { + filter.Truncated = *set.Truncated + } for _, fs := range set.FilterSet { + if filter.PropagateMissing { + for i := range fs.ObjectSet { + for _, p := range fs.ObjectSet[i].MissingSet { + // Same behavior as mo.ObjectContentToType() + return soap.WrapVimFault(p.Fault.Fault) + } + } + } + if f(fs.ObjectSet) { return nil } diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/session/keep_alive.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/session/keep_alive.go index 3b44f5ffb614..7c1bebf91349 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/session/keep_alive.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/session/keep_alive.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2015 VMware, Inc. All Rights Reserved. +Copyright (c) 2015-2020 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,110 +17,24 @@ limitations under the License. package session import ( - "context" - "sync" "time" - "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/session/keepalive" "github.com/vmware/govmomi/vim25/soap" ) -type keepAlive struct { - sync.Mutex - - roundTripper soap.RoundTripper - idleTime time.Duration - notifyRequest chan struct{} - notifyStop chan struct{} - notifyWaitGroup sync.WaitGroup - - // keepAlive executes a request in the background with the purpose of - // keeping the session active. The response for this request is discarded. - keepAlive func(soap.RoundTripper) error -} - -func defaultKeepAlive(roundTripper soap.RoundTripper) error { - _, _ = methods.GetCurrentTime(context.Background(), roundTripper) - return nil -} - -// KeepAlive wraps the specified soap.RoundTripper and executes a meaningless -// API request in the background after the RoundTripper has been idle for the -// specified amount of idle time. The keep alive process only starts once a -// user logs in and runs until the user logs out again. +// KeepAlive is a backward compatible wrapper around KeepAliveHandler. func KeepAlive(roundTripper soap.RoundTripper, idleTime time.Duration) soap.RoundTripper { - return KeepAliveHandler(roundTripper, idleTime, defaultKeepAlive) + return KeepAliveHandler(roundTripper, idleTime, nil) } -// KeepAliveHandler works as KeepAlive() does, but the handler param can decide how to handle errors. -// For example, if connectivity to ESX/VC is down long enough for a session to expire, a handler can choose to -// Login() on a types.NotAuthenticated error. If handler returns non-nil, the keep alive go routine will be stopped. +// KeepAliveHandler is a backward compatible wrapper around keepalive.NewHandlerSOAP. func KeepAliveHandler(roundTripper soap.RoundTripper, idleTime time.Duration, handler func(soap.RoundTripper) error) soap.RoundTripper { - k := &keepAlive{ - roundTripper: roundTripper, - idleTime: idleTime, - notifyRequest: make(chan struct{}), - } - - k.keepAlive = handler - - return k -} - -func (k *keepAlive) start() { - k.Lock() - defer k.Unlock() - - if k.notifyStop != nil { - return - } - - // This channel must be closed to terminate idle timer. - k.notifyStop = make(chan struct{}) - k.notifyWaitGroup.Add(1) - - go func() { - defer k.notifyWaitGroup.Done() - - for t := time.NewTimer(k.idleTime); ; { - select { - case <-k.notifyStop: - return - case <-k.notifyRequest: - t.Reset(k.idleTime) - case <-t.C: - if err := k.keepAlive(k.roundTripper); err != nil { - k.stop() - } - t = time.NewTimer(k.idleTime) - } + var f func() error + if handler != nil { + f = func() error { + return handler(roundTripper) } - }() -} - -func (k *keepAlive) stop() { - k.Lock() - defer k.Unlock() - - if k.notifyStop != nil { - close(k.notifyStop) - k.notifyWaitGroup.Wait() - k.notifyStop = nil - } -} - -func (k *keepAlive) RoundTrip(ctx context.Context, req, res soap.HasFault) error { - err := k.roundTripper.RoundTrip(ctx, req, res) - if err != nil { - return err - } - // Start ticker on login, stop ticker on logout. - switch req.(type) { - case *methods.LoginBody, *methods.LoginExtensionByCertificateBody, *methods.LoginByTokenBody: - k.start() - case *methods.LogoutBody: - k.stop() } - - return nil + return keepalive.NewHandlerSOAP(roundTripper, idleTime, f) } diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/session/keepalive/handler.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/session/keepalive/handler.go new file mode 100644 index 000000000000..3ebf046a53bc --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/session/keepalive/handler.go @@ -0,0 +1,204 @@ +/* +Copyright (c) 2020 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package keepalive + +import ( + "context" + "errors" + "net/http" + "sync" + "time" + + "github.com/vmware/govmomi/vapi/rest" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/soap" +) + +// handler contains the generic keep alive settings and logic +type handler struct { + mu sync.Mutex + notifyStop chan struct{} + notifyWaitGroup sync.WaitGroup + + idle time.Duration + send func() error +} + +// NewHandlerSOAP returns a soap.RoundTripper for use with a vim25.Client +// The idle time specifies the interval in between send() requests. Defaults to 10 minutes. +// The send func is used to keep a session alive. Defaults to calling vim25 GetCurrentTime(). +// The keep alive goroutine starts when a Login method is called and runs until Logout is called or send returns an error. +func NewHandlerSOAP(c soap.RoundTripper, idle time.Duration, send func() error) *HandlerSOAP { + h := &handler{ + idle: idle, + send: send, + } + + if send == nil { + h.send = func() error { + return h.keepAliveSOAP(c) + } + } + + return &HandlerSOAP{h, c} +} + +// NewHandlerREST returns an http.RoundTripper for use with a rest.Client +// The idle time specifies the interval in between send() requests. Defaults to 10 minutes. +// The send func is used to keep a session alive. Defaults to calling the rest.Client.Session() method +// The keep alive goroutine starts when a Login method is called and runs until Logout is called or send returns an error. +func NewHandlerREST(c *rest.Client, idle time.Duration, send func() error) *HandlerREST { + h := &handler{ + idle: idle, + send: send, + } + + if send == nil { + h.send = func() error { + return h.keepAliveREST(c) + } + } + + return &HandlerREST{h, c.Transport} +} + +func (h *handler) keepAliveSOAP(rt soap.RoundTripper) error { + ctx := context.Background() + _, err := methods.GetCurrentTime(ctx, rt) + return err +} + +func (h *handler) keepAliveREST(c *rest.Client) error { + ctx := context.Background() + + s, err := c.Session(ctx) + if err != nil { + return err + } + if s != nil { + return nil + } + return errors.New(http.StatusText(http.StatusUnauthorized)) +} + +// Start explicitly starts the keep alive go routine. +// For use with session cache.Client, as cached sessions may not involve Login/Logout via RoundTripper. +func (h *handler) Start() { + h.mu.Lock() + defer h.mu.Unlock() + + if h.notifyStop != nil { + return + } + + if h.idle == 0 { + h.idle = time.Minute * 10 + } + + // This channel must be closed to terminate idle timer. + h.notifyStop = make(chan struct{}) + h.notifyWaitGroup.Add(1) + + go func() { + for t := time.NewTimer(h.idle); ; { + select { + case <-h.notifyStop: + h.notifyWaitGroup.Done() + t.Stop() + return + case <-t.C: + if err := h.send(); err != nil { + h.notifyWaitGroup.Done() + h.Stop() + return + } + t.Reset(h.idle) + } + } + }() +} + +// Stop explicitly stops the keep alive go routine. +// For use with session cache.Client, as cached sessions may not involve Login/Logout via RoundTripper. +func (h *handler) Stop() { + h.mu.Lock() + defer h.mu.Unlock() + + if h.notifyStop != nil { + close(h.notifyStop) + h.notifyWaitGroup.Wait() + h.notifyStop = nil + } +} + +// HandlerSOAP is a keep alive implementation for use with vim25.Client +type HandlerSOAP struct { + *handler + + roundTripper soap.RoundTripper +} + +// RoundTrip implements soap.RoundTripper +func (h *HandlerSOAP) RoundTrip(ctx context.Context, req, res soap.HasFault) error { + // Stop ticker on logout. + switch req.(type) { + case *methods.LogoutBody: + h.Stop() + } + + err := h.roundTripper.RoundTrip(ctx, req, res) + if err != nil { + return err + } + + // Start ticker on login. + switch req.(type) { + case *methods.LoginBody, *methods.LoginExtensionByCertificateBody, *methods.LoginByTokenBody: + h.Start() + } + + return nil +} + +// HandlerREST is a keep alive implementation for use with rest.Client +type HandlerREST struct { + *handler + + roundTripper http.RoundTripper +} + +// RoundTrip implements http.RoundTripper +func (h *HandlerREST) RoundTrip(req *http.Request) (*http.Response, error) { + if req.URL.Path != "/rest/com/vmware/cis/session" { + return h.roundTripper.RoundTrip(req) + } + + if req.Method == http.MethodDelete { // Logout + h.Stop() + } + + res, err := h.roundTripper.RoundTrip(req) + if err != nil { + return res, err + } + + if req.Method == http.MethodPost { // Login + h.Start() + } + + return res, err +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/session/manager.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/session/manager.go index ad3219716a48..8689acd504a4 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/session/manager.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/session/manager.go @@ -18,9 +18,10 @@ package session import ( "context" - "net/http" + "io/ioutil" "net/url" "os" + "strings" "github.com/vmware/govmomi/property" "github.com/vmware/govmomi/vim25" @@ -41,6 +42,21 @@ func init() { } } +// Secret returns the contents if a file path value is given, otherwise returns value itself. +func Secret(value string) (string, error) { + if len(value) == 0 { + return value, nil + } + contents, err := ioutil.ReadFile(value) + if err != nil { + if os.IsPermission(err) { + return "", err + } + return value, nil + } + return strings.TrimSpace(string(contents)), nil +} + type Manager struct { client *vim25.Client userSession *types.UserSession @@ -107,7 +123,7 @@ func (sm *Manager) LoginExtensionByCertificate(ctx context.Context, key string) // "Post https://sdkTunnel:8089/sdk: x509: certificate is valid for $vcenter_hostname, not sdkTunnel" // The only easy way around this is to disable verification for the call to LoginExtensionByCertificate(). // TODO: find a way to avoid disabling InsecureSkipVerify. - c.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify = true + c.DefaultTransport().TLSClientConfig.InsecureSkipVerify = true } req := types.LoginExtensionByCertificate{ @@ -265,3 +281,14 @@ func (sm *Manager) CloneSession(ctx context.Context, ticket string) error { sm.userSession = &res.Returnval return nil } + +func (sm *Manager) UpdateServiceMessage(ctx context.Context, message string) error { + req := types.UpdateServiceMessage{ + This: sm.Reference(), + Message: message, + } + + _, err := methods.UpdateServiceMessage(ctx, sm.client, &req) + + return err +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/sts/client.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/sts/client.go index 70e02de09398..d98c560954a5 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/sts/client.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/sts/client.go @@ -38,6 +38,8 @@ const ( // Client is a soap.Client targeting the STS (Secure Token Service) API endpoint. type Client struct { *soap.Client + + RoundTripper soap.RoundTripper } // NewClient returns a client targeting the STS API endpoint. @@ -48,7 +50,7 @@ func NewClient(ctx context.Context, c *vim25.Client) (*Client, error) { filter := &types.LookupServiceRegistrationFilter{ ServiceType: &types.LookupServiceRegistrationServiceType{ Product: "com.vmware.cis", - Type: "sso:sts", + Type: "cs.identity", }, EndpointType: &types.LookupServiceRegistrationEndpointType{ Protocol: "wsTrust", @@ -59,11 +61,19 @@ func NewClient(ctx context.Context, c *vim25.Client) (*Client, error) { url := lookup.EndpointURL(ctx, c, Path, filter) sc := c.Client.NewServiceClient(url, Namespace) - return &Client{sc}, nil + return &Client{sc, sc}, nil +} + +// RoundTrip dispatches to the RoundTripper field. +func (c *Client) RoundTrip(ctx context.Context, req, res soap.HasFault) error { + return c.RoundTripper.RoundTrip(ctx, req, res) } // TokenRequest parameters for issuing a SAML token. // At least one of Userinfo or Certificate must be specified. +// When `TokenRequest.Certificate` is set, the `tls.Certificate.PrivateKey` field must be set as it is required to sign the request. +// When the `tls.Certificate.Certificate` field is not set, the request Assertion header is set to that of the TokenRequest.Token. +// Otherwise `tls.Certificate.Certificate` is used as the BinarySecurityToken in the request. type TokenRequest struct { Userinfo *url.Userinfo // Userinfo when set issues a Bearer token Certificate *tls.Certificate // Certificate when set issues a HoK token diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/sts/internal/types.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/sts/internal/types.go index dc8402b018f4..875dbe50538c 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/sts/internal/types.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/sts/internal/types.go @@ -473,12 +473,12 @@ func (a *AttributeStatement) C14N() string { type AttributeValue struct { XMLName xml.Name - Type string `xml:"type,attr"` + Type string `xml:"type,attr,typeattr"` Value string `xml:",innerxml"` } func (a *AttributeValue) C14N() string { - return fmt.Sprintf(`%s`, XSI, a.Value) + return fmt.Sprintf(`%s`, XSI, a.Type, a.Value) } type Attribute struct { @@ -682,7 +682,7 @@ func Marshal(val interface{}) string { // Note that the namespace is required when encoding, but the namespace prefix must not be // present when decoding as Go's decoding does not handle namespace prefix. func mkns(ns string, obj interface{}, name ...*xml.Name) string { - ns = ns + ":" + ns += ":" for i := range name { name[i].Space = "" if !strings.HasPrefix(name[i].Local, ns) { diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/sts/signer.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/sts/signer.go index 6a3b042bce8f..6ee0ffdebd03 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/sts/signer.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/sts/signer.go @@ -30,12 +30,14 @@ import ( "io" "io/ioutil" mrand "math/rand" + "net" "net/http" "net/url" "strings" "time" "github.com/google/uuid" + "github.com/vmware/govmomi/sts/internal" "github.com/vmware/govmomi/vim25/methods" "github.com/vmware/govmomi/vim25/soap" @@ -126,22 +128,32 @@ func (s *Signer) Sign(env soap.Envelope) ([]byte, error) { req := x.RequestSecurityToken() c14n = req.C14N() body = req.String() - id := newID() - - info.SecurityTokenReference = &internal.SecurityTokenReference{ - Reference: &internal.SecurityReference{ - URI: "#" + id, - ValueType: "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-x509-token-profile-1.0#X509v3", - }, - } - header.BinarySecurityToken = &internal.BinarySecurityToken{ - EncodingType: "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-soap-message-security-1.0#Base64Binary", - ValueType: "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-x509-token-profile-1.0#X509v3", - ID: id, - Value: base64.StdEncoding.EncodeToString(s.Certificate.Certificate[0]), + if len(s.Certificate.Certificate) == 0 { + header.Assertion = s.Token + if err := s.setTokenReference(&info); err != nil { + return nil, err + } + } else { + id := newID() + + header.BinarySecurityToken = &internal.BinarySecurityToken{ + EncodingType: "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-soap-message-security-1.0#Base64Binary", + ValueType: "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-x509-token-profile-1.0#X509v3", + ID: id, + Value: base64.StdEncoding.EncodeToString(s.Certificate.Certificate[0]), + } + + info.SecurityTokenReference = &internal.SecurityTokenReference{ + Reference: &internal.SecurityReference{ + URI: "#" + id, + ValueType: "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-x509-token-profile-1.0#X509v3", + }, + } } - } else { + } + // When requesting HoK token for interactive user, request will have both priv. key and username/password. + if s.user.Username() != "" { header.UsernameToken = &internal.UsernameToken{ Username: s.user.Username(), } @@ -263,7 +275,7 @@ func (s *Signer) SignRequest(req *http.Request) error { return fmt.Errorf("sts: reading http.Request body: %s", rerr) } } - bhash := sha256.New().Sum(body) + bhash := sha256.Sum256(body) port := req.URL.Port() if port == "" { @@ -271,18 +283,25 @@ func (s *Signer) SignRequest(req *http.Request) error { } var buf bytes.Buffer + host := req.URL.Hostname() + + // Check if the host IP is in IPv6 format. If yes, add the opening and closing square brackets. + if isIPv6(host) { + host = fmt.Sprintf("%s%s%s", "[", host, "]") + } + msg := []string{ nonce, req.Method, req.URL.Path, - strings.ToLower(req.URL.Hostname()), + strings.ToLower(host), port, } for i := range msg { buf.WriteString(msg[i]) buf.WriteByte('\n') } - buf.Write(bhash) + buf.Write(bhash[:]) buf.WriteByte('\n') sum := sha256.Sum256(buf.Bytes()) @@ -309,7 +328,7 @@ func (s *Signer) SignRequest(req *http.Request) error { }) add(param{ key: "bodyhash", - val: base64.StdEncoding.EncodeToString(bhash), + val: base64.StdEncoding.EncodeToString(bhash[:]), }) } @@ -326,3 +345,11 @@ func (s *Signer) NewRequest() TokenRequest { KeyID: s.keyID, } } + +func isIPv6(s string) bool { + ip := net.ParseIP(s) + if ip == nil { + return false + } + return ip.To4() == nil +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/task/error.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/task/error.go index 5f6b8503f5bc..3fff5aa26582 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/task/error.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/task/error.go @@ -20,6 +20,7 @@ import "github.com/vmware/govmomi/vim25/types" type Error struct { *types.LocalizedMethodFault + Description *types.LocalizableMessage } // Error returns the task's localized fault message. diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/task/history_collector.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/task/history_collector.go new file mode 100644 index 000000000000..7d8a6cf04ab5 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/task/history_collector.go @@ -0,0 +1,89 @@ +/* +Copyright (c) 2015-2022 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package task + +import ( + "context" + + "github.com/vmware/govmomi/history" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +// HistoryCollector provides a mechanism for retrieving historical data and +// updates when the server appends new tasks. +type HistoryCollector struct { + *history.Collector +} + +func newHistoryCollector(c *vim25.Client, ref types.ManagedObjectReference) *HistoryCollector { + return &HistoryCollector{ + Collector: history.NewCollector(c, ref), + } +} + +// LatestPage returns items in the 'viewable latest page' of the task history collector. +// As new tasks that match the collector's TaskFilterSpec are created, +// they are added to this page, and the oldest tasks are removed from the collector to keep +// the size of the page to that allowed by SetCollectorPageSize. +// The "oldest task" is the one with the oldest creation time. The tasks in the returned page are unordered. +func (h HistoryCollector) LatestPage(ctx context.Context) ([]types.TaskInfo, error) { + var o mo.TaskHistoryCollector + + err := h.Properties(ctx, h.Reference(), []string{"latestPage"}, &o) + if err != nil { + return nil, err + } + + return o.LatestPage, nil +} + +// ReadNextTasks reads the scrollable view from the current position. The +// scrollable position is moved to the next newer page after the read. No item +// is returned when the end of the collector is reached. +func (h HistoryCollector) ReadNextTasks(ctx context.Context, maxCount int32) ([]types.TaskInfo, error) { + req := types.ReadNextTasks{ + This: h.Reference(), + MaxCount: maxCount, + } + + res, err := methods.ReadNextTasks(ctx, h.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +// ReadPreviousTasks reads the scrollable view from the current position. The +// scrollable position is then moved to the next older page after the read. No +// item is returned when the head of the collector is reached. +func (h HistoryCollector) ReadPreviousTasks(ctx context.Context, maxCount int32) ([]types.TaskInfo, error) { + req := types.ReadPreviousTasks{ + This: h.Reference(), + MaxCount: maxCount, + } + + res, err := methods.ReadPreviousTasks(ctx, h.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/task/manager.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/task/manager.go new file mode 100644 index 000000000000..8279089d7dba --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/task/manager.go @@ -0,0 +1,61 @@ +/* +Copyright (c) 2014-2022 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package task + +import ( + "context" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +type Manager struct { + r types.ManagedObjectReference + c *vim25.Client +} + +// NewManager creates a new task manager +func NewManager(c *vim25.Client) *Manager { + m := Manager{ + r: *c.ServiceContent.TaskManager, + c: c, + } + + return &m +} + +// Reference returns the task.Manager MOID +func (m Manager) Reference() types.ManagedObjectReference { + return m.r +} + +// CreateCollectorForTasks returns a task history collector, a specialized +// history collector that gathers TaskInfo data objects. +func (m Manager) CreateCollectorForTasks(ctx context.Context, filter types.TaskFilterSpec) (*HistoryCollector, error) { + req := types.CreateCollectorForTasks{ + This: m.r, + Filter: filter, + } + + res, err := methods.CreateCollectorForTasks(ctx, m.c, &req) + if err != nil { + return nil, err + } + + return newHistoryCollector(m.c, res.Returnval), nil +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/task/wait.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/task/wait.go index 19fee5384636..b78f5110d959 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/task/wait.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/task/wait.go @@ -38,7 +38,7 @@ func (t taskProgress) Detail() string { func (t taskProgress) Error() error { if t.info.Error != nil { - return Error{t.info.Error} + return Error{t.info.Error, t.info.Description} } return nil @@ -68,7 +68,7 @@ func (t *taskCallback) fn(pc []types.PropertyChange) bool { t.info = &ti } - // t.info could be nil if pc can't satify the rules above + // t.info could be nil if pc can't satisfy the rules above if t.info == nil { return false } @@ -123,7 +123,18 @@ func Wait(ctx context.Context, ref types.ManagedObjectReference, pc *property.Co defer close(cb.ch) } - err := property.Wait(ctx, pc, ref, []string{"info"}, cb.fn) + filter := &property.WaitFilter{PropagateMissing: true} + filter.Add(ref, ref.Type, []string{"info"}) + + err := property.WaitForUpdates(ctx, pc, filter, func(updates []types.ObjectUpdate) bool { + for _, update := range updates { + if cb.fn(update.ChangeSet) { + return true + } + } + + return false + }) if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vapi/internal/internal.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vapi/internal/internal.go index 781299947238..f6584c569bf2 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vapi/internal/internal.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vapi/internal/internal.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2018 VMware, Inc. All Rights Reserved. +Copyright (c) 2018-2022 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,23 +17,36 @@ limitations under the License. package internal import ( - "bytes" - "encoding/json" - "io" - "net/http" - "net/url" - "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" ) +// VAPI REST Paths const ( - Path = "/rest/com/vmware" - SessionPath = "/cis/session" - CategoryPath = "/cis/tagging/category" - TagPath = "/cis/tagging/tag" - AssociationPath = "/cis/tagging/tag-association" - SessionCookieName = "vmware-api-session-id" + SessionPath = "/com/vmware/cis/session" + CategoryPath = "/com/vmware/cis/tagging/category" + TagPath = "/com/vmware/cis/tagging/tag" + AssociationPath = "/com/vmware/cis/tagging/tag-association" + LibraryPath = "/com/vmware/content/library" + LibraryItemFileData = "/com/vmware/cis/data" + LibraryItemPath = "/com/vmware/content/library/item" + LibraryItemFilePath = "/com/vmware/content/library/item/file" + LibraryItemUpdateSession = "/com/vmware/content/library/item/update-session" + LibraryItemUpdateSessionFile = "/com/vmware/content/library/item/updatesession/file" + LibraryItemDownloadSession = "/com/vmware/content/library/item/download-session" + LibraryItemDownloadSessionFile = "/com/vmware/content/library/item/downloadsession/file" + LocalLibraryPath = "/com/vmware/content/local-library" + SubscribedLibraryPath = "/com/vmware/content/subscribed-library" + SecurityPoliciesPath = "/api/content/security-policies" + SubscribedLibraryItem = "/com/vmware/content/library/subscribed-item" + Subscriptions = "/com/vmware/content/library/subscriptions" + TrustedCertificatesPath = "/api/content/trusted-certificates" + VCenterOVFLibraryItem = "/com/vmware/vcenter/ovf/library-item" + VCenterVMTXLibraryItem = "/vcenter/vm-template/library-items" + VCenterVM = "/vcenter/vm" + SessionCookieName = "vmware-api-session-id" + UseHeaderAuthn = "vmware-use-header-authn" + DebugEcho = "/vc-sim/debug/echo" ) // AssociatedObject is the same structure as types.ManagedObjectReference, @@ -62,63 +75,15 @@ func NewAssociation(ref mo.Reference) Association { } } -type CloneURL interface { - URL() *url.URL -} - -// Resource wraps url.URL with helpers -type Resource struct { - u *url.URL -} - -func URL(c CloneURL, path string) *Resource { - r := &Resource{u: c.URL()} - r.u.Path = Path + path - return r -} - -// WithID appends id to the URL.Path -func (r *Resource) WithID(id string) *Resource { - r.u.Path += "/id:" + id - return r -} - -// WithAction sets adds action to the URL.RawQuery -func (r *Resource) WithAction(action string) *Resource { - r.u.RawQuery = url.Values{ - "~action": []string{action}, - }.Encode() - return r -} - -// Request returns a new http.Request for the given method. -// An optional body can be provided for POST and PATCH methods. -func (r *Resource) Request(method string, body ...interface{}) *http.Request { - rdr := io.MultiReader() // empty body by default - if len(body) != 0 { - rdr = encode(body[0]) - } - req, err := http.NewRequest(method, r.u.String(), rdr) - if err != nil { - panic(err) - } - return req -} - -type errorReader struct { - e error +type SubscriptionDestination struct { + ID string `json:"subscription"` } -func (e errorReader) Read([]byte) (int, error) { - return -1, e.e +type SubscriptionDestinationSpec struct { + Subscriptions []SubscriptionDestination `json:"subscriptions,omitempty"` } -// encode body as JSON, deferring any errors until io.Reader is used. -func encode(body interface{}) io.Reader { - var b bytes.Buffer - err := json.NewEncoder(&b).Encode(body) - if err != nil { - return errorReader{err} - } - return &b +type SubscriptionItemDestinationSpec struct { + Force bool `json:"force_sync_content"` + Subscriptions []SubscriptionDestination `json:"subscriptions,omitempty"` } diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vapi/rest/client.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vapi/rest/client.go index 152928ee2207..b1cefd0e2801 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vapi/rest/client.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vapi/rest/client.go @@ -25,6 +25,9 @@ import ( "io/ioutil" "net/http" "net/url" + "strings" + "sync" + "time" "github.com/vmware/govmomi/vapi/internal" "github.com/vmware/govmomi/vim25" @@ -33,14 +36,93 @@ import ( // Client extends soap.Client to support JSON encoding, while inheriting security features, debug tracing and session persistence. type Client struct { + mu sync.Mutex + *soap.Client + sessionID string +} + +// Session information +type Session struct { + User string `json:"user"` + Created time.Time `json:"created_time"` + LastAccessed time.Time `json:"last_accessed_time"` +} + +// LocalizableMessage represents a localizable error +type LocalizableMessage struct { + Args []string `json:"args,omitempty"` + DefaultMessage string `json:"default_message,omitempty"` + ID string `json:"id,omitempty"` +} + +func (m *LocalizableMessage) Error() string { + return m.DefaultMessage } // NewClient creates a new Client instance. func NewClient(c *vim25.Client) *Client { - sc := c.Client.NewServiceClient(internal.Path, "") + sc := c.Client.NewServiceClient(Path, "") + + return &Client{Client: sc} +} + +// SessionID is set by calling Login() or optionally with the given id param +func (c *Client) SessionID(id ...string) string { + c.mu.Lock() + defer c.mu.Unlock() + if len(id) != 0 { + c.sessionID = id[0] + } + return c.sessionID +} + +type marshaledClient struct { + SoapClient *soap.Client + SessionID string +} + +func (c *Client) MarshalJSON() ([]byte, error) { + m := marshaledClient{ + SoapClient: c.Client, + SessionID: c.sessionID, + } + + return json.Marshal(m) +} + +func (c *Client) UnmarshalJSON(b []byte) error { + var m marshaledClient - return &Client{sc} + err := json.Unmarshal(b, &m) + if err != nil { + return err + } + + *c = Client{ + Client: m.SoapClient, + sessionID: m.SessionID, + } + + return nil +} + +// isAPI returns true if path starts with "/api" +// This hack allows helpers to support both endpoints: +// "/rest" - value wrapped responses and structured error responses +// "/api" - raw responses and no structured error responses +func isAPI(path string) bool { + return strings.HasPrefix(path, "/api") +} + +// Resource helper for the given path. +func (c *Client) Resource(path string) *Resource { + r := &Resource{u: c.URL()} + if !isAPI(path) { + path = Path + path + } + r.u.Path = path + return r } type Signer interface { @@ -53,24 +135,64 @@ func (c *Client) WithSigner(ctx context.Context, s Signer) context.Context { return context.WithValue(ctx, signerContext{}, s) } +type headersContext struct{} + +// WithHeader returns a new Context populated with the provided headers map. +// Calls to a VAPI REST client with this context will populate the HTTP headers +// map using the provided headers. +func (c *Client) WithHeader( + ctx context.Context, + headers http.Header) context.Context { + + return context.WithValue(ctx, headersContext{}, headers) +} + +type statusError struct { + res *http.Response +} + +func (e *statusError) Error() string { + return fmt.Sprintf("%s %s: %s", e.res.Request.Method, e.res.Request.URL, e.res.Status) +} + +// RawResponse may be used with the Do method as the resBody argument in order +// to capture the raw response data. +type RawResponse struct { + bytes.Buffer +} + // Do sends the http.Request, decoding resBody if provided. func (c *Client) Do(ctx context.Context, req *http.Request, resBody interface{}) error { switch req.Method { - case http.MethodPost, http.MethodPatch: + case http.MethodPost, http.MethodPatch, http.MethodPut: req.Header.Set("Content-Type", "application/json") } req.Header.Set("Accept", "application/json") + if id := c.SessionID(); id != "" { + req.Header.Set(internal.SessionCookieName, id) + } + if s, ok := ctx.Value(signerContext{}).(Signer); ok { if err := s.SignRequest(req); err != nil { return err } } + if headers, ok := ctx.Value(headersContext{}).(http.Header); ok { + for k, v := range headers { + for _, v := range v { + req.Header.Add(k, v) + } + } + } + return c.Client.Do(ctx, req, func(res *http.Response) error { switch res.StatusCode { case http.StatusOK: + case http.StatusCreated: + case http.StatusNoContent: case http.StatusBadRequest: // TODO: structured error types detail, err := ioutil.ReadAll(res.Body) @@ -79,7 +201,7 @@ func (c *Client) Do(ctx context.Context, req *http.Request, resBody interface{}) } return fmt.Errorf("%s: %s", res.Status, bytes.TrimSpace(detail)) default: - return fmt.Errorf("%s %s: %s", req.Method, req.URL, res.Status) + return &statusError{res} } if resBody == nil { @@ -87,23 +209,68 @@ func (c *Client) Do(ctx context.Context, req *http.Request, resBody interface{}) } switch b := resBody.(type) { + case *RawResponse: + return res.Write(b) case io.Writer: _, err := io.Copy(b, res.Body) return err default: + d := json.NewDecoder(res.Body) + if isAPI(req.URL.Path) { + // Responses from the /api endpoint are not wrapped + return d.Decode(resBody) + } + // Responses from the /rest endpoint are wrapped in this structure val := struct { Value interface{} `json:"value,omitempty"` }{ resBody, } - return json.NewDecoder(res.Body).Decode(&val) + return d.Decode(&val) } }) } +// authHeaders ensures the given map contains a REST auth header +func (c *Client) authHeaders(h map[string]string) map[string]string { + if _, exists := h[internal.SessionCookieName]; exists { + return h + } + if h == nil { + h = make(map[string]string) + } + + h[internal.SessionCookieName] = c.SessionID() + + return h +} + +// Download wraps soap.Client.Download, adding the REST authentication header +func (c *Client) Download(ctx context.Context, u *url.URL, param *soap.Download) (io.ReadCloser, int64, error) { + p := *param + p.Headers = c.authHeaders(p.Headers) + return c.Client.Download(ctx, u, &p) +} + +// DownloadFile wraps soap.Client.DownloadFile, adding the REST authentication header +func (c *Client) DownloadFile(ctx context.Context, file string, u *url.URL, param *soap.Download) error { + p := *param + p.Headers = c.authHeaders(p.Headers) + return c.Client.DownloadFile(ctx, file, u, &p) +} + +// Upload wraps soap.Client.Upload, adding the REST authentication header +func (c *Client) Upload(ctx context.Context, f io.Reader, u *url.URL, param *soap.Upload) error { + p := *param + p.Headers = c.authHeaders(p.Headers) + return c.Client.Upload(ctx, f, u, &p) +} + // Login creates a new session via Basic Authentication with the given url.Userinfo. func (c *Client) Login(ctx context.Context, user *url.Userinfo) error { - req := internal.URL(c, internal.SessionPath).Request(http.MethodPost) + req := c.Resource(internal.SessionPath).Request(http.MethodPost) + + req.Header.Set(internal.UseHeaderAuthn, "true") if user != nil { if password, ok := user.Password(); ok { @@ -111,15 +278,59 @@ func (c *Client) Login(ctx context.Context, user *url.Userinfo) error { } } - return c.Do(ctx, req, nil) + var id string + err := c.Do(ctx, req, &id) + if err != nil { + return err + } + + c.SessionID(id) + + return nil } func (c *Client) LoginByToken(ctx context.Context) error { return c.Login(ctx, nil) } +// Session returns the user's current session. +// Nil is returned if the session is not authenticated. +func (c *Client) Session(ctx context.Context) (*Session, error) { + var s Session + req := c.Resource(internal.SessionPath).WithAction("get").Request(http.MethodPost) + err := c.Do(ctx, req, &s) + if err != nil { + if e, ok := err.(*statusError); ok { + if e.res.StatusCode == http.StatusUnauthorized { + return nil, nil + } + } + return nil, err + } + return &s, nil +} + // Logout deletes the current session. func (c *Client) Logout(ctx context.Context) error { - req := internal.URL(c, internal.SessionPath).Request(http.MethodDelete) + req := c.Resource(internal.SessionPath).Request(http.MethodDelete) return c.Do(ctx, req, nil) } + +// Valid returns whether or not the client is valid and ready for use. +// This should be called after unmarshalling the client. +func (c *Client) Valid() bool { + if c == nil { + return false + } + + if c.Client == nil { + return false + } + + return true +} + +// Path returns rest.Path (see cache.Client) +func (c *Client) Path() string { + return Path +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vapi/rest/resource.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vapi/rest/resource.go new file mode 100644 index 000000000000..e6e627492ab4 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vapi/rest/resource.go @@ -0,0 +1,114 @@ +/* +Copyright (c) 2019 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + "net/url" +) + +const ( + Path = "/rest" +) + +// Resource wraps url.URL with helpers +type Resource struct { + u *url.URL +} + +func (r *Resource) String() string { + return r.u.String() +} + +// WithID appends id to the URL.Path +func (r *Resource) WithID(id string) *Resource { + r.u.Path += "/id:" + id + return r +} + +// WithAction sets adds action to the URL.RawQuery +func (r *Resource) WithAction(action string) *Resource { + return r.WithParam("~action", action) +} + +// WithParam adds one parameter on the URL.RawQuery +func (r *Resource) WithParam(name string, value string) *Resource { + // ParseQuery handles empty case, and we control access to query string so shouldn't encounter an error case + params, err := url.ParseQuery(r.u.RawQuery) + if err != nil { + panic(err) + } + params[name] = append(params[name], value) + r.u.RawQuery = params.Encode() + return r +} + +// WithPathEncodedParam appends a parameter on the URL.RawQuery, +// For special cases where URL Path-style encoding is needed +func (r *Resource) WithPathEncodedParam(name string, value string) *Resource { + t := &url.URL{Path: value} + encodedValue := t.String() + t = &url.URL{Path: name} + encodedName := t.String() + // ParseQuery handles empty case, and we control access to query string so shouldn't encounter an error case + params, err := url.ParseQuery(r.u.RawQuery) + if err != nil { + panic(err) + } + // Values.Encode() doesn't escape exactly how we want, so we need to build the query string ourselves + if len(params) >= 1 { + r.u.RawQuery = r.u.RawQuery + "&" + encodedName + "=" + encodedValue + } else { + r.u.RawQuery = r.u.RawQuery + encodedName + "=" + encodedValue + } + return r +} + +// Request returns a new http.Request for the given method. +// An optional body can be provided for POST and PATCH methods. +func (r *Resource) Request(method string, body ...interface{}) *http.Request { + rdr := io.MultiReader() // empty body by default + if len(body) != 0 { + rdr = encode(body[0]) + } + req, err := http.NewRequest(method, r.u.String(), rdr) + if err != nil { + panic(err) + } + return req +} + +type errorReader struct { + e error +} + +func (e errorReader) Read([]byte) (int, error) { + return -1, e.e +} + +// encode body as JSON, deferring any errors until io.Reader is used. +func encode(body interface{}) io.Reader { + var b bytes.Buffer + err := json.NewEncoder(&b).Encode(body) + if err != nil { + return errorReader{err} + } + return &b +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vapi/tags/categories.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vapi/tags/categories.go index 119b96506218..64d0b66c16c4 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vapi/tags/categories.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vapi/tags/categories.go @@ -20,11 +20,13 @@ import ( "context" "fmt" "net/http" + "strings" "github.com/vmware/govmomi/vapi/internal" ) -// Category provides methods to create, read, update, delete, and enumerate categories. +// Category provides methods to create, read, update, delete, and enumerate +// categories. type Category struct { ID string `json:"id,omitempty"` Name string `json:"name,omitempty"` @@ -87,12 +89,13 @@ func (c *Manager) CreateCategory(ctx context.Context, category *Category) (strin // otherwise create fails with invalid_argument spec.Category.AssociableTypes = []string{} } - url := internal.URL(c, internal.CategoryPath) + url := c.Resource(internal.CategoryPath) var res string return res, c.Do(ctx, url.Request(http.MethodPost, spec), &res) } -// UpdateCategory can update one or more of the AssociableTypes, Cardinality, Description and Name fields. +// UpdateCategory updates one or more of the AssociableTypes, Cardinality, +// Description and Name fields. func (c *Manager) UpdateCategory(ctx context.Context, category *Category) error { spec := struct { Category Category `json:"update_spec"` @@ -104,13 +107,13 @@ func (c *Manager) UpdateCategory(ctx context.Context, category *Category) error Name: category.Name, }, } - url := internal.URL(c, internal.CategoryPath).WithID(category.ID) + url := c.Resource(internal.CategoryPath).WithID(category.ID) return c.Do(ctx, url.Request(http.MethodPatch, spec), nil) } -// DeleteCategory deletes an existing category. +// DeleteCategory deletes a category. func (c *Manager) DeleteCategory(ctx context.Context, category *Category) error { - url := internal.URL(c, internal.CategoryPath).WithID(category.ID) + url := c.Resource(internal.CategoryPath).WithID(category.ID) return c.Do(ctx, url.Request(http.MethodDelete), nil) } @@ -129,19 +132,19 @@ func (c *Manager) GetCategory(ctx context.Context, id string) (*Category, error) } } } - url := internal.URL(c, internal.CategoryPath).WithID(id) + url := c.Resource(internal.CategoryPath).WithID(id) var res Category return &res, c.Do(ctx, url.Request(http.MethodGet), &res) } // ListCategories returns all category IDs in the system. func (c *Manager) ListCategories(ctx context.Context) ([]string, error) { - url := internal.URL(c, internal.CategoryPath) + url := c.Resource(internal.CategoryPath) var res []string return res, c.Do(ctx, url.Request(http.MethodGet), &res) } -// GetCategories fetches an array of category information in the system. +// GetCategories fetches a list of category information in the system. func (c *Manager) GetCategories(ctx context.Context) ([]Category, error) { ids, err := c.ListCategories(ctx) if err != nil { @@ -152,11 +155,13 @@ func (c *Manager) GetCategories(ctx context.Context) ([]Category, error) { for _, id := range ids { category, err := c.GetCategory(ctx, id) if err != nil { - return nil, fmt.Errorf("get category %s: %s", id, err) + if strings.Contains(err.Error(), http.StatusText(http.StatusNotFound)) { + continue // deleted since last fetch + } + return nil, fmt.Errorf("get category %s: %v", id, err) } - categories = append(categories, *category) - } + return categories, nil } diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vapi/tags/errors.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vapi/tags/errors.go new file mode 100644 index 000000000000..b3f84f842f4f --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vapi/tags/errors.go @@ -0,0 +1,55 @@ +/* +Copyright (c) 2020 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tags + +import ( + "fmt" +) + +const ( + errFormat = "[error: %d type: %s reason: %s]" + separator = "," // concat multiple error strings +) + +// BatchError is an error returned for a single item which failed in a batch +// operation +type BatchError struct { + Type string `json:"id"` + Message string `json:"default_message"` +} + +// BatchErrors contains all errors which occurred in a batch operation +type BatchErrors []BatchError + +func (b BatchErrors) Error() string { + if len(b) == 0 { + return "" + } + + var errString string + for i := range b { + errType := b[i].Type + reason := b[i].Message + errString += fmt.Sprintf(errFormat, i, errType, reason) + + // no separator after last item + if i+1 < len(b) { + errString += separator + } + } + return errString +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vapi/tags/tag_association.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vapi/tags/tag_association.go index 1e1434186f08..33b220936617 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vapi/tags/tag_association.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vapi/tags/tag_association.go @@ -18,6 +18,7 @@ package tags import ( "context" + "encoding/json" "fmt" "net/http" @@ -43,7 +44,7 @@ func (c *Manager) AttachTag(ctx context.Context, tagID string, ref mo.Reference) return err } spec := internal.NewAssociation(ref) - url := internal.URL(c, internal.AssociationPath).WithID(id).WithAction("attach") + url := c.Resource(internal.AssociationPath).WithID(id).WithAction("attach") return c.Do(ctx, url.Request(http.MethodPost, spec), nil) } @@ -55,14 +56,146 @@ func (c *Manager) DetachTag(ctx context.Context, tagID string, ref mo.Reference) return err } spec := internal.NewAssociation(ref) - url := internal.URL(c, internal.AssociationPath).WithID(id).WithAction("detach") + url := c.Resource(internal.AssociationPath).WithID(id).WithAction("detach") return c.Do(ctx, url.Request(http.MethodPost, spec), nil) } +// batchResponse is the response type used by attach/detach operations which +// take multiple tagIDs or moRefs as input. On failure Success will be false and +// Errors contains information about all failed operations +type batchResponse struct { + Success bool `json:"success"` + Errors BatchErrors `json:"error_messages,omitempty"` +} + +// AttachTagToMultipleObjects attaches a tag ID to multiple managed objects. +// This operation is idempotent, i.e. if a tag is already attached to the +// object, then the individual operation is a no-op and no error will be thrown. +// +// This operation was added in vSphere API 6.5. +func (c *Manager) AttachTagToMultipleObjects(ctx context.Context, tagID string, refs []mo.Reference) error { + id, err := c.tagID(ctx, tagID) + if err != nil { + return err + } + + var ids []internal.AssociatedObject + for i := range refs { + ids = append(ids, internal.AssociatedObject(refs[i].Reference())) + } + + spec := struct { + ObjectIDs []internal.AssociatedObject `json:"object_ids"` + }{ids} + + url := c.Resource(internal.AssociationPath).WithID(id).WithAction("attach-tag-to-multiple-objects") + return c.Do(ctx, url.Request(http.MethodPost, spec), nil) +} + +// AttachMultipleTagsToObject attaches multiple tag IDs to a managed object. +// This operation is idempotent. If a tag is already attached to the object, +// then the individual operation is a no-op and no error will be thrown. This +// operation is not atomic. If the underlying call fails with one or more tags +// not successfully attached to the managed object reference it might leave the +// managed object reference in a partially tagged state and needs to be resolved +// by the caller. In this case BatchErrors is returned and can be used to +// analyse failure reasons on each failed tag. +// +// Specified tagIDs must use URN-notation instead of display names or a generic +// error will be returned and no tagging operation will be performed. If the +// managed object reference does not exist a generic 403 Forbidden error will be +// returned. +// +// This operation was added in vSphere API 6.5. +func (c *Manager) AttachMultipleTagsToObject(ctx context.Context, tagIDs []string, ref mo.Reference) error { + for _, id := range tagIDs { + // URN enforced to avoid unnecessary round-trips due to invalid tags or display + // name lookups + if isName(id) { + return fmt.Errorf("specified tag is not a URN: %q", id) + } + } + + obj := internal.AssociatedObject(ref.Reference()) + spec := struct { + ObjectID internal.AssociatedObject `json:"object_id"` + TagIDs []string `json:"tag_ids"` + }{ + ObjectID: obj, + TagIDs: tagIDs, + } + + var res batchResponse + url := c.Resource(internal.AssociationPath).WithAction("attach-multiple-tags-to-object") + err := c.Do(ctx, url.Request(http.MethodPost, spec), &res) + if err != nil { + return err + } + + if !res.Success { + if len(res.Errors) != 0 { + return res.Errors + } + panic("invalid batch error") + } + + return nil +} + +// DetachMultipleTagsFromObject detaches multiple tag IDs from a managed object. +// This operation is idempotent. If a tag is already detached from the object, +// then the individual operation is a no-op and no error will be thrown. This +// operation is not atomic. If the underlying call fails with one or more tags +// not successfully detached from the managed object reference it might leave +// the managed object reference in a partially tagged state and needs to be +// resolved by the caller. In this case BatchErrors is returned and can be used +// to analyse failure reasons on each failed tag. +// +// Specified tagIDs must use URN-notation instead of display names or a generic +// error will be returned and no tagging operation will be performed. If the +// managed object reference does not exist a generic 403 Forbidden error will be +// returned. +// +// This operation was added in vSphere API 6.5. +func (c *Manager) DetachMultipleTagsFromObject(ctx context.Context, tagIDs []string, ref mo.Reference) error { + for _, id := range tagIDs { + // URN enforced to avoid unnecessary round-trips due to invalid tags or display + // name lookups + if isName(id) { + return fmt.Errorf("specified tag is not a URN: %q", id) + } + } + + obj := internal.AssociatedObject(ref.Reference()) + spec := struct { + ObjectID internal.AssociatedObject `json:"object_id"` + TagIDs []string `json:"tag_ids"` + }{ + ObjectID: obj, + TagIDs: tagIDs, + } + + var res batchResponse + url := c.Resource(internal.AssociationPath).WithAction("detach-multiple-tags-from-object") + err := c.Do(ctx, url.Request(http.MethodPost, spec), &res) + if err != nil { + return err + } + + if !res.Success { + if len(res.Errors) != 0 { + return res.Errors + } + panic("invalid batch error") + } + + return nil +} + // ListAttachedTags fetches the array of tag IDs attached to the given object. func (c *Manager) ListAttachedTags(ctx context.Context, ref mo.Reference) ([]string, error) { spec := internal.NewAssociation(ref) - url := internal.URL(c, internal.AssociationPath).WithAction("list-attached-tags") + url := c.Resource(internal.AssociationPath).WithAction("list-attached-tags") var res []string return res, c.Do(ctx, url.Request(http.MethodPost, spec), &res) } @@ -91,7 +224,7 @@ func (c *Manager) ListAttachedObjects(ctx context.Context, tagID string) ([]mo.R if err != nil { return nil, err } - url := internal.URL(c, internal.AssociationPath).WithID(id).WithAction("list-attached-objects") + url := c.Resource(internal.AssociationPath).WithID(id).WithAction("list-attached-objects") var res []internal.AssociatedObject if err := c.Do(ctx, url.Request(http.MethodPost, nil), &res); err != nil { return nil, err @@ -103,3 +236,142 @@ func (c *Manager) ListAttachedObjects(ctx context.Context, tagID string) ([]mo.R } return refs, nil } + +// AttachedObjects is the response type used by ListAttachedObjectsOnTags. +type AttachedObjects struct { + TagID string `json:"tag_id"` + Tag *Tag `json:"tag,omitempty"` + ObjectIDs []mo.Reference `json:"object_ids"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (t *AttachedObjects) UnmarshalJSON(b []byte) error { + var o struct { + TagID string `json:"tag_id"` + ObjectIDs []internal.AssociatedObject `json:"object_ids"` + } + err := json.Unmarshal(b, &o) + if err != nil { + return err + } + + t.TagID = o.TagID + t.ObjectIDs = make([]mo.Reference, len(o.ObjectIDs)) + for i := range o.ObjectIDs { + t.ObjectIDs[i] = o.ObjectIDs[i] + } + + return nil +} + +// ListAttachedObjectsOnTags fetches the array of attached objects for the given tag IDs. +func (c *Manager) ListAttachedObjectsOnTags(ctx context.Context, tagID []string) ([]AttachedObjects, error) { + var ids []string + for i := range tagID { + id, err := c.tagID(ctx, tagID[i]) + if err != nil { + return nil, err + } + ids = append(ids, id) + } + + spec := struct { + TagIDs []string `json:"tag_ids"` + }{ids} + + url := c.Resource(internal.AssociationPath).WithAction("list-attached-objects-on-tags") + var res []AttachedObjects + return res, c.Do(ctx, url.Request(http.MethodPost, spec), &res) +} + +// GetAttachedObjectsOnTags combines ListAttachedObjectsOnTags and populates each Tag field. +func (c *Manager) GetAttachedObjectsOnTags(ctx context.Context, tagID []string) ([]AttachedObjects, error) { + objs, err := c.ListAttachedObjectsOnTags(ctx, tagID) + if err != nil { + return nil, fmt.Errorf("list attached objects %s: %s", tagID, err) + } + + tags := make(map[string]*Tag) + + for i := range objs { + var err error + id := objs[i].TagID + tag, ok := tags[id] + if !ok { + tag, err = c.GetTag(ctx, id) + if err != nil { + return nil, fmt.Errorf("get tag %s: %s", id, err) + } + objs[i].Tag = tag + } + } + + return objs, nil +} + +// AttachedTags is the response type used by ListAttachedTagsOnObjects. +type AttachedTags struct { + ObjectID mo.Reference `json:"object_id"` + TagIDs []string `json:"tag_ids"` + Tags []Tag `json:"tags,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (t *AttachedTags) UnmarshalJSON(b []byte) error { + var o struct { + ObjectID internal.AssociatedObject `json:"object_id"` + TagIDs []string `json:"tag_ids"` + } + err := json.Unmarshal(b, &o) + if err != nil { + return err + } + + t.ObjectID = o.ObjectID + t.TagIDs = o.TagIDs + + return nil +} + +// ListAttachedTagsOnObjects fetches the array of attached tag IDs for the given object IDs. +func (c *Manager) ListAttachedTagsOnObjects(ctx context.Context, objectID []mo.Reference) ([]AttachedTags, error) { + var ids []internal.AssociatedObject + for i := range objectID { + ids = append(ids, internal.AssociatedObject(objectID[i].Reference())) + } + + spec := struct { + ObjectIDs []internal.AssociatedObject `json:"object_ids"` + }{ids} + + url := c.Resource(internal.AssociationPath).WithAction("list-attached-tags-on-objects") + var res []AttachedTags + return res, c.Do(ctx, url.Request(http.MethodPost, spec), &res) +} + +// GetAttachedTagsOnObjects calls ListAttachedTagsOnObjects and populates each Tags field. +func (c *Manager) GetAttachedTagsOnObjects(ctx context.Context, objectID []mo.Reference) ([]AttachedTags, error) { + objs, err := c.ListAttachedTagsOnObjects(ctx, objectID) + if err != nil { + return nil, fmt.Errorf("list attached tags %s: %s", objectID, err) + } + + tags := make(map[string]*Tag) + + for i := range objs { + for _, id := range objs[i].TagIDs { + var err error + tag, ok := tags[id] + if !ok { + tag, err = c.GetTag(ctx, id) + if err != nil { + return nil, fmt.Errorf("get tag %s: %s", id, err) + } + tags[id] = tag + } + objs[i].Tags = append(objs[i].Tags, *tag) + } + } + + return objs, nil +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vapi/tags/tags.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vapi/tags/tags.go index 8e3c0a6cac34..b9024f998a2b 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vapi/tags/tags.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vapi/tags/tags.go @@ -90,7 +90,7 @@ func (c *Manager) CreateTag(ctx context.Context, tag *Tag) (string, error) { } spec.Tag.CategoryID = cat.ID } - url := internal.URL(c, internal.TagPath) + url := c.Resource(internal.TagPath) var res string return res, c.Do(ctx, url.Request(http.MethodPost, spec), &res) } @@ -105,13 +105,13 @@ func (c *Manager) UpdateTag(ctx context.Context, tag *Tag) error { Description: tag.Description, }, } - url := internal.URL(c, internal.TagPath).WithID(tag.ID) + url := c.Resource(internal.TagPath).WithID(tag.ID) return c.Do(ctx, url.Request(http.MethodPatch, spec), nil) } // DeleteTag deletes an existing tag. func (c *Manager) DeleteTag(ctx context.Context, tag *Tag) error { - url := internal.URL(c, internal.TagPath).WithID(tag.ID) + url := c.Resource(internal.TagPath).WithID(tag.ID) return c.Do(ctx, url.Request(http.MethodDelete), nil) } @@ -131,7 +131,7 @@ func (c *Manager) GetTag(ctx context.Context, id string) (*Tag, error) { } } - url := internal.URL(c, internal.TagPath).WithID(id) + url := c.Resource(internal.TagPath).WithID(id) var res Tag return &res, c.Do(ctx, url.Request(http.MethodGet), &res) @@ -148,10 +148,10 @@ func (c *Manager) GetTagForCategory(ctx context.Context, id, category string) (* return nil, err } - for _, id := range ids { - tag, err := c.GetTag(ctx, id) + for _, tagid := range ids { + tag, err := c.GetTag(ctx, tagid) if err != nil { - return nil, fmt.Errorf("get tag for category %s %s: %s", category, id, err) + return nil, fmt.Errorf("get tag for category %s %s: %s", category, tagid, err) } if tag.ID == id || tag.Name == id { return tag, nil @@ -163,7 +163,7 @@ func (c *Manager) GetTagForCategory(ctx context.Context, id, category string) (* // ListTags returns all tag IDs in the system. func (c *Manager) ListTags(ctx context.Context) ([]string, error) { - url := internal.URL(c, internal.TagPath) + url := c.Resource(internal.TagPath) var res []string return res, c.Do(ctx, url.Request(http.MethodGet), &res) } @@ -201,7 +201,7 @@ func (c *Manager) ListTagsForCategory(ctx context.Context, id string) ([]string, body := struct { ID string `json:"category_id"` }{id} - url := internal.URL(c, internal.TagPath).WithID(id).WithAction("list-tags-for-category") + url := c.Resource(internal.TagPath).WithID(id).WithAction("list-tags-for-category") var res []string return res, c.Do(ctx, url.Request(http.MethodPost, body), &res) } diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/view/container_view.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/view/container_view.go new file mode 100644 index 000000000000..39041c41f973 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/view/container_view.go @@ -0,0 +1,145 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package view + +import ( + "context" + + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +type ContainerView struct { + ManagedObjectView +} + +func NewContainerView(c *vim25.Client, ref types.ManagedObjectReference) *ContainerView { + return &ContainerView{ + ManagedObjectView: *NewManagedObjectView(c, ref), + } +} + +// Retrieve populates dst as property.Collector.Retrieve does, for all entities in the view of types specified by kind. +func (v ContainerView) Retrieve(ctx context.Context, kind []string, ps []string, dst interface{}, pspec ...types.PropertySpec) error { + pc := property.DefaultCollector(v.Client()) + + ospec := types.ObjectSpec{ + Obj: v.Reference(), + Skip: types.NewBool(true), + SelectSet: []types.BaseSelectionSpec{ + &types.TraversalSpec{ + Type: v.Reference().Type, + Path: "view", + }, + }, + } + + if len(kind) == 0 { + kind = []string{"ManagedEntity"} + } + + for _, t := range kind { + spec := types.PropertySpec{ + Type: t, + } + + if len(ps) == 0 { + spec.All = types.NewBool(true) + } else { + spec.PathSet = ps + } + + pspec = append(pspec, spec) + } + + req := types.RetrieveProperties{ + SpecSet: []types.PropertyFilterSpec{ + { + ObjectSet: []types.ObjectSpec{ospec}, + PropSet: pspec, + }, + }, + } + + res, err := pc.RetrieveProperties(ctx, req) + if err != nil { + return err + } + + if d, ok := dst.(*[]types.ObjectContent); ok { + *d = res.Returnval + return nil + } + + return mo.LoadObjectContent(res.Returnval, dst) +} + +// RetrieveWithFilter populates dst as Retrieve does, but only for entities matching the given filter. +func (v ContainerView) RetrieveWithFilter(ctx context.Context, kind []string, ps []string, dst interface{}, filter property.Filter) error { + if len(filter) == 0 { + return v.Retrieve(ctx, kind, ps, dst) + } + + var content []types.ObjectContent + + err := v.Retrieve(ctx, kind, filter.Keys(), &content) + if err != nil { + return err + } + + objs := filter.MatchObjectContent(content) + + pc := property.DefaultCollector(v.Client()) + + return pc.Retrieve(ctx, objs, ps, dst) +} + +// Find returns object references for entities of type kind, matching the given filter. +func (v ContainerView) Find(ctx context.Context, kind []string, filter property.Filter) ([]types.ManagedObjectReference, error) { + if len(filter) == 0 { + // Ensure we have at least 1 filter to avoid retrieving all properties. + filter = property.Filter{"name": "*"} + } + + var content []types.ObjectContent + + err := v.Retrieve(ctx, kind, filter.Keys(), &content) + if err != nil { + return nil, err + } + + return filter.MatchObjectContent(content), nil +} + +// FindAny returns object references for entities of type kind, matching any property the given filter. +func (v ContainerView) FindAny(ctx context.Context, kind []string, filter property.Filter) ([]types.ManagedObjectReference, error) { + if len(filter) == 0 { + // Ensure we have at least 1 filter to avoid retrieving all properties. + filter = property.Filter{"name": "*"} + } + + var content []types.ObjectContent + + err := v.Retrieve(ctx, kind, filter.Keys(), &content) + if err != nil { + return nil, err + } + + return filter.MatchAnyObjectContent(content), nil +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/view/list_view.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/view/list_view.go new file mode 100644 index 000000000000..e8cfb504341e --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/view/list_view.go @@ -0,0 +1,62 @@ +/* +Copyright (c) 2015-2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package view + +import ( + "context" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +type ListView struct { + ManagedObjectView +} + +func NewListView(c *vim25.Client, ref types.ManagedObjectReference) *ListView { + return &ListView{ + ManagedObjectView: *NewManagedObjectView(c, ref), + } +} + +func (v ListView) Add(ctx context.Context, refs []types.ManagedObjectReference) error { + req := types.ModifyListView{ + This: v.Reference(), + Add: refs, + } + _, err := methods.ModifyListView(ctx, v.Client(), &req) + return err +} + +func (v ListView) Remove(ctx context.Context, refs []types.ManagedObjectReference) error { + req := types.ModifyListView{ + This: v.Reference(), + Remove: refs, + } + _, err := methods.ModifyListView(ctx, v.Client(), &req) + return err +} + +func (v ListView) Reset(ctx context.Context, refs []types.ManagedObjectReference) error { + req := types.ResetListView{ + This: v.Reference(), + Obj: refs, + } + _, err := methods.ResetListView(ctx, v.Client(), &req) + return err +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/view/managed_object_view.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/view/managed_object_view.go new file mode 100644 index 000000000000..805c8643107c --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/view/managed_object_view.go @@ -0,0 +1,52 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package view + +import ( + "context" + + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +type ManagedObjectView struct { + object.Common +} + +func NewManagedObjectView(c *vim25.Client, ref types.ManagedObjectReference) *ManagedObjectView { + return &ManagedObjectView{ + Common: object.NewCommon(c, ref), + } +} + +func (v *ManagedObjectView) TraversalSpec() *types.TraversalSpec { + return &types.TraversalSpec{ + Path: "view", + Type: v.Reference().Type, + } +} + +func (v *ManagedObjectView) Destroy(ctx context.Context) error { + req := types.DestroyView{ + This: v.Reference(), + } + + _, err := methods.DestroyView(ctx, v.Client(), &req) + return err +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/view/manager.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/view/manager.go new file mode 100644 index 000000000000..d44def0cd997 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/view/manager.go @@ -0,0 +1,69 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package view + +import ( + "context" + + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +type Manager struct { + object.Common +} + +func NewManager(c *vim25.Client) *Manager { + m := Manager{ + object.NewCommon(c, *c.ServiceContent.ViewManager), + } + + return &m +} + +func (m Manager) CreateListView(ctx context.Context, objects []types.ManagedObjectReference) (*ListView, error) { + req := types.CreateListView{ + This: m.Common.Reference(), + Obj: objects, + } + + res, err := methods.CreateListView(ctx, m.Client(), &req) + if err != nil { + return nil, err + } + + return NewListView(m.Client(), res.Returnval), nil +} + +func (m Manager) CreateContainerView(ctx context.Context, container types.ManagedObjectReference, managedObjectTypes []string, recursive bool) (*ContainerView, error) { + + req := types.CreateContainerView{ + This: m.Common.Reference(), + Container: container, + Recursive: recursive, + Type: managedObjectTypes, + } + + res, err := methods.CreateContainerView(ctx, m.Client(), &req) + if err != nil { + return nil, err + } + + return NewContainerView(m.Client(), res.Returnval), nil +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/view/task_view.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/view/task_view.go new file mode 100644 index 000000000000..68f62f8d107f --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/view/task_view.go @@ -0,0 +1,125 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package view + +import ( + "context" + + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25/types" +) + +// TaskView extends ListView such that it can follow a ManagedEntity's recentTask updates. +type TaskView struct { + *ListView + + Follow bool + + Watch *types.ManagedObjectReference +} + +// CreateTaskView creates a new ListView that optionally watches for a ManagedEntity's recentTask updates. +func (m Manager) CreateTaskView(ctx context.Context, watch *types.ManagedObjectReference) (*TaskView, error) { + l, err := m.CreateListView(ctx, nil) + if err != nil { + return nil, err + } + + tv := &TaskView{ + ListView: l, + Watch: watch, + } + + return tv, nil +} + +// Collect calls function f for each Task update. +func (v TaskView) Collect(ctx context.Context, f func([]types.TaskInfo)) error { + // Using TaskHistoryCollector would be less clunky, but it isn't supported on ESX at all. + ref := v.Reference() + filter := new(property.WaitFilter).Add(ref, "Task", []string{"info"}, v.TraversalSpec()) + + if v.Watch != nil { + filter.Add(*v.Watch, v.Watch.Type, []string{"recentTask"}) + } + + pc := property.DefaultCollector(v.Client()) + + completed := make(map[string]bool) + + return property.WaitForUpdates(ctx, pc, filter, func(updates []types.ObjectUpdate) bool { + var infos []types.TaskInfo + var prune []types.ManagedObjectReference + var tasks []types.ManagedObjectReference + var reset func() + + for _, update := range updates { + for _, change := range update.ChangeSet { + if change.Name == "recentTask" { + tasks = change.Val.(types.ArrayOfManagedObjectReference).ManagedObjectReference + if len(tasks) != 0 { + reset = func() { + _ = v.Reset(ctx, tasks) + + // Remember any tasks we've reported as complete already, + // to avoid reporting multiple times when Reset is triggered. + rtasks := make(map[string]bool) + for i := range tasks { + if _, ok := completed[tasks[i].Value]; ok { + rtasks[tasks[i].Value] = true + } + } + completed = rtasks + } + } + + continue + } + + info, ok := change.Val.(types.TaskInfo) + if !ok { + continue + } + + if !completed[info.Task.Value] { + infos = append(infos, info) + } + + if v.Follow && info.CompleteTime != nil { + prune = append(prune, info.Task) + completed[info.Task.Value] = true + } + } + } + + if len(infos) != 0 { + f(infos) + } + + if reset != nil { + reset() + } else if len(prune) != 0 { + _ = v.Remove(ctx, prune) + } + + if len(tasks) != 0 && len(infos) == 0 { + return false + } + + return !v.Follow + }) +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/client.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/client.go index 1d26fb8b6a7f..b14cea852021 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/client.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/client.go @@ -19,16 +19,20 @@ package vim25 import ( "context" "encoding/json" + "fmt" + "net/http" + "path" "strings" "github.com/vmware/govmomi/vim25/methods" "github.com/vmware/govmomi/vim25/soap" "github.com/vmware/govmomi/vim25/types" + "github.com/vmware/govmomi/vim25/xml" ) const ( Namespace = "vim25" - Version = "6.7" + Version = "7.0" Path = "/sdk" ) @@ -54,7 +58,7 @@ type Client struct { RoundTripper soap.RoundTripper } -// NewClient creates and returns a new client wirh the ServiceContent field +// NewClient creates and returns a new client with the ServiceContent field // filled in. func NewClient(ctx context.Context, rt soap.RoundTripper) (*Client, error) { c := Client{ @@ -67,7 +71,7 @@ func NewClient(ctx context.Context, rt soap.RoundTripper) (*Client, error) { if c.Namespace == "" { c.Namespace = "urn:" + Namespace - } else if strings.Index(c.Namespace, ":") < 0 { + } else if !strings.Contains(c.Namespace, ":") { c.Namespace = "urn:" + c.Namespace // ensure valid URI format } if c.Version == "" { @@ -84,6 +88,42 @@ func NewClient(ctx context.Context, rt soap.RoundTripper) (*Client, error) { return &c, nil } +// UseServiceVersion sets soap.Client.Version to the current version of the service endpoint via /sdk/vimServiceVersions.xml +func (c *Client) UseServiceVersion(kind ...string) error { + ns := "vim" + if len(kind) != 0 { + ns = kind[0] + } + + u := c.URL() + u.Path = path.Join(Path, ns+"ServiceVersions.xml") + + res, err := c.Get(u.String()) + if err != nil { + return err + } + + if res.StatusCode != http.StatusOK { + return fmt.Errorf("http.Get(%s): %s", u.Path, err) + } + + v := struct { + Namespace *string `xml:"namespace>name"` + Version *string `xml:"namespace>version"` + }{ + &c.Namespace, + &c.Version, + } + + err = xml.NewDecoder(res.Body).Decode(&v) + _ = res.Body.Close() + if err != nil { + return fmt.Errorf("xml.Decode(%s): %s", u.Path, err) + } + + return nil +} + // RoundTrip dispatches to the RoundTripper field. func (c *Client) RoundTrip(ctx context.Context, req, res soap.HasFault) error { return c.RoundTripper.RoundTrip(ctx, req, res) @@ -140,6 +180,11 @@ func (c *Client) Valid() bool { return true } +// Path returns vim25.Path (see cache.Client) +func (c *Client) Path() string { + return Path +} + // IsVC returns true if we are connected to a vCenter func (c *Client) IsVC() bool { return c.ServiceContent.About.ApiType == "VirtualCenter" diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/debug/debug.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/debug/debug.go index 22d547178466..048062825d55 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/debug/debug.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/debug/debug.go @@ -18,8 +18,7 @@ package debug import ( "io" - "os" - "path" + "regexp" ) // Provider specified the interface types must implement to be used as a @@ -30,7 +29,22 @@ type Provider interface { Flush() } +// ReadCloser is a struct that satisfies the io.ReadCloser interface +type ReadCloser struct { + io.Reader + io.Closer +} + +// NewTeeReader wraps io.TeeReader and patches through the Close() function. +func NewTeeReader(rc io.ReadCloser, w io.Writer) io.ReadCloser { + return ReadCloser{ + Reader: io.TeeReader(rc, w), + Closer: rc, + } +} + var currentProvider Provider = nil +var scrubPassword = regexp.MustCompile(`(.*)`) func SetProvider(p Provider) { if currentProvider != nil { @@ -54,28 +68,6 @@ func Flush() { currentProvider.Flush() } -// FileProvider implements a debugging provider that creates a real file for -// every call to NewFile. It maintains a list of all files that it creates, -// such that it can close them when its Flush function is called. -type FileProvider struct { - Path string - - files []*os.File -} - -func (fp *FileProvider) NewFile(p string) io.WriteCloser { - f, err := os.Create(path.Join(fp.Path, p)) - if err != nil { - panic(err) - } - - fp.files = append(fp.files, f) - - return f -} - -func (fp *FileProvider) Flush() { - for _, f := range fp.files { - f.Close() - } +func Scrub(in []byte) []byte { + return scrubPassword.ReplaceAll(in, []byte(`********`)) } diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/debug/file.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/debug/file.go new file mode 100644 index 000000000000..4290a29167ff --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/debug/file.go @@ -0,0 +1,75 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package debug + +import ( + "io" + "os" + "path" + "sync" +) + +// FileProvider implements a debugging provider that creates a real file for +// every call to NewFile. It maintains a list of all files that it creates, +// such that it can close them when its Flush function is called. +type FileProvider struct { + Path string + + mu sync.Mutex + files []*os.File +} + +func (fp *FileProvider) NewFile(p string) io.WriteCloser { + f, err := os.Create(path.Join(fp.Path, p)) + if err != nil { + panic(err) + } + + fp.mu.Lock() + defer fp.mu.Unlock() + fp.files = append(fp.files, f) + + return NewFileWriterCloser(f, p) +} + +func (fp *FileProvider) Flush() { + fp.mu.Lock() + defer fp.mu.Unlock() + for _, f := range fp.files { + f.Close() + } +} + +type FileWriterCloser struct { + f *os.File + p string +} + +func NewFileWriterCloser(f *os.File, p string) *FileWriterCloser { + return &FileWriterCloser{ + f, + p, + } +} + +func (fwc *FileWriterCloser) Write(p []byte) (n int, err error) { + return fwc.f.Write(Scrub(p)) +} + +func (fwc *FileWriterCloser) Close() error { + return fwc.f.Close() +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/debug/log.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/debug/log.go new file mode 100644 index 000000000000..183c606a3cb6 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/debug/log.go @@ -0,0 +1,49 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package debug + +import ( + "fmt" + "io" + "os" +) + +type LogWriterCloser struct { +} + +func NewLogWriterCloser() *LogWriterCloser { + return &LogWriterCloser{} +} + +func (lwc *LogWriterCloser) Write(p []byte) (n int, err error) { + fmt.Fprint(os.Stderr, string(Scrub(p))) + return len(p), nil +} + +func (lwc *LogWriterCloser) Close() error { + return nil +} + +type LogProvider struct { +} + +func (s *LogProvider) NewFile(p string) io.WriteCloser { + return NewLogWriterCloser() +} + +func (s *LogProvider) Flush() { +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/methods/methods.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/methods/methods.go index c3ad23eefb46..4f1cf8ac0b52 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/methods/methods.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/methods/methods.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2014-2018 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2022 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,9 +23,29 @@ import ( "github.com/vmware/govmomi/vim25/types" ) +type AbandonHciWorkflowBody struct { + Req *types.AbandonHciWorkflow `xml:"urn:vim25 AbandonHciWorkflow,omitempty"` + Res *types.AbandonHciWorkflowResponse `xml:"AbandonHciWorkflowResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AbandonHciWorkflowBody) Fault() *soap.Fault { return b.Fault_ } + +func AbandonHciWorkflow(ctx context.Context, r soap.RoundTripper, req *types.AbandonHciWorkflow) (*types.AbandonHciWorkflowResponse, error) { + var reqBody, resBody AbandonHciWorkflowBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type AbdicateDomOwnershipBody struct { Req *types.AbdicateDomOwnership `xml:"urn:vim25 AbdicateDomOwnership,omitempty"` - Res *types.AbdicateDomOwnershipResponse `xml:"urn:vim25 AbdicateDomOwnershipResponse,omitempty"` + Res *types.AbdicateDomOwnershipResponse `xml:"AbdicateDomOwnershipResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -43,9 +63,29 @@ func AbdicateDomOwnership(ctx context.Context, r soap.RoundTripper, req *types.A return resBody.Res, nil } +type AbortCustomization_TaskBody struct { + Req *types.AbortCustomization_Task `xml:"urn:vim25 AbortCustomization_Task,omitempty"` + Res *types.AbortCustomization_TaskResponse `xml:"AbortCustomization_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AbortCustomization_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func AbortCustomization_Task(ctx context.Context, r soap.RoundTripper, req *types.AbortCustomization_Task) (*types.AbortCustomization_TaskResponse, error) { + var reqBody, resBody AbortCustomization_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type AcknowledgeAlarmBody struct { Req *types.AcknowledgeAlarm `xml:"urn:vim25 AcknowledgeAlarm,omitempty"` - Res *types.AcknowledgeAlarmResponse `xml:"urn:vim25 AcknowledgeAlarmResponse,omitempty"` + Res *types.AcknowledgeAlarmResponse `xml:"AcknowledgeAlarmResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -65,7 +105,7 @@ func AcknowledgeAlarm(ctx context.Context, r soap.RoundTripper, req *types.Ackno type AcquireCimServicesTicketBody struct { Req *types.AcquireCimServicesTicket `xml:"urn:vim25 AcquireCimServicesTicket,omitempty"` - Res *types.AcquireCimServicesTicketResponse `xml:"urn:vim25 AcquireCimServicesTicketResponse,omitempty"` + Res *types.AcquireCimServicesTicketResponse `xml:"AcquireCimServicesTicketResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -85,7 +125,7 @@ func AcquireCimServicesTicket(ctx context.Context, r soap.RoundTripper, req *typ type AcquireCloneTicketBody struct { Req *types.AcquireCloneTicket `xml:"urn:vim25 AcquireCloneTicket,omitempty"` - Res *types.AcquireCloneTicketResponse `xml:"urn:vim25 AcquireCloneTicketResponse,omitempty"` + Res *types.AcquireCloneTicketResponse `xml:"AcquireCloneTicketResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -105,7 +145,7 @@ func AcquireCloneTicket(ctx context.Context, r soap.RoundTripper, req *types.Acq type AcquireCredentialsInGuestBody struct { Req *types.AcquireCredentialsInGuest `xml:"urn:vim25 AcquireCredentialsInGuest,omitempty"` - Res *types.AcquireCredentialsInGuestResponse `xml:"urn:vim25 AcquireCredentialsInGuestResponse,omitempty"` + Res *types.AcquireCredentialsInGuestResponse `xml:"AcquireCredentialsInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -125,7 +165,7 @@ func AcquireCredentialsInGuest(ctx context.Context, r soap.RoundTripper, req *ty type AcquireGenericServiceTicketBody struct { Req *types.AcquireGenericServiceTicket `xml:"urn:vim25 AcquireGenericServiceTicket,omitempty"` - Res *types.AcquireGenericServiceTicketResponse `xml:"urn:vim25 AcquireGenericServiceTicketResponse,omitempty"` + Res *types.AcquireGenericServiceTicketResponse `xml:"AcquireGenericServiceTicketResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -145,7 +185,7 @@ func AcquireGenericServiceTicket(ctx context.Context, r soap.RoundTripper, req * type AcquireLocalTicketBody struct { Req *types.AcquireLocalTicket `xml:"urn:vim25 AcquireLocalTicket,omitempty"` - Res *types.AcquireLocalTicketResponse `xml:"urn:vim25 AcquireLocalTicketResponse,omitempty"` + Res *types.AcquireLocalTicketResponse `xml:"AcquireLocalTicketResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -165,7 +205,7 @@ func AcquireLocalTicket(ctx context.Context, r soap.RoundTripper, req *types.Acq type AcquireMksTicketBody struct { Req *types.AcquireMksTicket `xml:"urn:vim25 AcquireMksTicket,omitempty"` - Res *types.AcquireMksTicketResponse `xml:"urn:vim25 AcquireMksTicketResponse,omitempty"` + Res *types.AcquireMksTicketResponse `xml:"AcquireMksTicketResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -185,7 +225,7 @@ func AcquireMksTicket(ctx context.Context, r soap.RoundTripper, req *types.Acqui type AcquireTicketBody struct { Req *types.AcquireTicket `xml:"urn:vim25 AcquireTicket,omitempty"` - Res *types.AcquireTicketResponse `xml:"urn:vim25 AcquireTicketResponse,omitempty"` + Res *types.AcquireTicketResponse `xml:"AcquireTicketResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -205,7 +245,7 @@ func AcquireTicket(ctx context.Context, r soap.RoundTripper, req *types.AcquireT type AddAuthorizationRoleBody struct { Req *types.AddAuthorizationRole `xml:"urn:vim25 AddAuthorizationRole,omitempty"` - Res *types.AddAuthorizationRoleResponse `xml:"urn:vim25 AddAuthorizationRoleResponse,omitempty"` + Res *types.AddAuthorizationRoleResponse `xml:"AddAuthorizationRoleResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -225,7 +265,7 @@ func AddAuthorizationRole(ctx context.Context, r soap.RoundTripper, req *types.A type AddCustomFieldDefBody struct { Req *types.AddCustomFieldDef `xml:"urn:vim25 AddCustomFieldDef,omitempty"` - Res *types.AddCustomFieldDefResponse `xml:"urn:vim25 AddCustomFieldDefResponse,omitempty"` + Res *types.AddCustomFieldDefResponse `xml:"AddCustomFieldDefResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -245,7 +285,7 @@ func AddCustomFieldDef(ctx context.Context, r soap.RoundTripper, req *types.AddC type AddDVPortgroup_TaskBody struct { Req *types.AddDVPortgroup_Task `xml:"urn:vim25 AddDVPortgroup_Task,omitempty"` - Res *types.AddDVPortgroup_TaskResponse `xml:"urn:vim25 AddDVPortgroup_TaskResponse,omitempty"` + Res *types.AddDVPortgroup_TaskResponse `xml:"AddDVPortgroup_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -265,7 +305,7 @@ func AddDVPortgroup_Task(ctx context.Context, r soap.RoundTripper, req *types.Ad type AddDisks_TaskBody struct { Req *types.AddDisks_Task `xml:"urn:vim25 AddDisks_Task,omitempty"` - Res *types.AddDisks_TaskResponse `xml:"urn:vim25 AddDisks_TaskResponse,omitempty"` + Res *types.AddDisks_TaskResponse `xml:"AddDisks_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -285,7 +325,7 @@ func AddDisks_Task(ctx context.Context, r soap.RoundTripper, req *types.AddDisks type AddFilterBody struct { Req *types.AddFilter `xml:"urn:vim25 AddFilter,omitempty"` - Res *types.AddFilterResponse `xml:"urn:vim25 AddFilterResponse,omitempty"` + Res *types.AddFilterResponse `xml:"AddFilterResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -305,7 +345,7 @@ func AddFilter(ctx context.Context, r soap.RoundTripper, req *types.AddFilter) ( type AddFilterEntitiesBody struct { Req *types.AddFilterEntities `xml:"urn:vim25 AddFilterEntities,omitempty"` - Res *types.AddFilterEntitiesResponse `xml:"urn:vim25 AddFilterEntitiesResponse,omitempty"` + Res *types.AddFilterEntitiesResponse `xml:"AddFilterEntitiesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -325,7 +365,7 @@ func AddFilterEntities(ctx context.Context, r soap.RoundTripper, req *types.AddF type AddGuestAliasBody struct { Req *types.AddGuestAlias `xml:"urn:vim25 AddGuestAlias,omitempty"` - Res *types.AddGuestAliasResponse `xml:"urn:vim25 AddGuestAliasResponse,omitempty"` + Res *types.AddGuestAliasResponse `xml:"AddGuestAliasResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -345,7 +385,7 @@ func AddGuestAlias(ctx context.Context, r soap.RoundTripper, req *types.AddGuest type AddHost_TaskBody struct { Req *types.AddHost_Task `xml:"urn:vim25 AddHost_Task,omitempty"` - Res *types.AddHost_TaskResponse `xml:"urn:vim25 AddHost_TaskResponse,omitempty"` + Res *types.AddHost_TaskResponse `xml:"AddHost_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -365,7 +405,7 @@ func AddHost_Task(ctx context.Context, r soap.RoundTripper, req *types.AddHost_T type AddInternetScsiSendTargetsBody struct { Req *types.AddInternetScsiSendTargets `xml:"urn:vim25 AddInternetScsiSendTargets,omitempty"` - Res *types.AddInternetScsiSendTargetsResponse `xml:"urn:vim25 AddInternetScsiSendTargetsResponse,omitempty"` + Res *types.AddInternetScsiSendTargetsResponse `xml:"AddInternetScsiSendTargetsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -385,7 +425,7 @@ func AddInternetScsiSendTargets(ctx context.Context, r soap.RoundTripper, req *t type AddInternetScsiStaticTargetsBody struct { Req *types.AddInternetScsiStaticTargets `xml:"urn:vim25 AddInternetScsiStaticTargets,omitempty"` - Res *types.AddInternetScsiStaticTargetsResponse `xml:"urn:vim25 AddInternetScsiStaticTargetsResponse,omitempty"` + Res *types.AddInternetScsiStaticTargetsResponse `xml:"AddInternetScsiStaticTargetsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -405,7 +445,7 @@ func AddInternetScsiStaticTargets(ctx context.Context, r soap.RoundTripper, req type AddKeyBody struct { Req *types.AddKey `xml:"urn:vim25 AddKey,omitempty"` - Res *types.AddKeyResponse `xml:"urn:vim25 AddKeyResponse,omitempty"` + Res *types.AddKeyResponse `xml:"AddKeyResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -425,7 +465,7 @@ func AddKey(ctx context.Context, r soap.RoundTripper, req *types.AddKey) (*types type AddKeysBody struct { Req *types.AddKeys `xml:"urn:vim25 AddKeys,omitempty"` - Res *types.AddKeysResponse `xml:"urn:vim25 AddKeysResponse,omitempty"` + Res *types.AddKeysResponse `xml:"AddKeysResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -445,7 +485,7 @@ func AddKeys(ctx context.Context, r soap.RoundTripper, req *types.AddKeys) (*typ type AddLicenseBody struct { Req *types.AddLicense `xml:"urn:vim25 AddLicense,omitempty"` - Res *types.AddLicenseResponse `xml:"urn:vim25 AddLicenseResponse,omitempty"` + Res *types.AddLicenseResponse `xml:"AddLicenseResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -465,7 +505,7 @@ func AddLicense(ctx context.Context, r soap.RoundTripper, req *types.AddLicense) type AddMonitoredEntitiesBody struct { Req *types.AddMonitoredEntities `xml:"urn:vim25 AddMonitoredEntities,omitempty"` - Res *types.AddMonitoredEntitiesResponse `xml:"urn:vim25 AddMonitoredEntitiesResponse,omitempty"` + Res *types.AddMonitoredEntitiesResponse `xml:"AddMonitoredEntitiesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -485,7 +525,7 @@ func AddMonitoredEntities(ctx context.Context, r soap.RoundTripper, req *types.A type AddNetworkResourcePoolBody struct { Req *types.AddNetworkResourcePool `xml:"urn:vim25 AddNetworkResourcePool,omitempty"` - Res *types.AddNetworkResourcePoolResponse `xml:"urn:vim25 AddNetworkResourcePoolResponse,omitempty"` + Res *types.AddNetworkResourcePoolResponse `xml:"AddNetworkResourcePoolResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -505,7 +545,7 @@ func AddNetworkResourcePool(ctx context.Context, r soap.RoundTripper, req *types type AddPortGroupBody struct { Req *types.AddPortGroup `xml:"urn:vim25 AddPortGroup,omitempty"` - Res *types.AddPortGroupResponse `xml:"urn:vim25 AddPortGroupResponse,omitempty"` + Res *types.AddPortGroupResponse `xml:"AddPortGroupResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -525,7 +565,7 @@ func AddPortGroup(ctx context.Context, r soap.RoundTripper, req *types.AddPortGr type AddServiceConsoleVirtualNicBody struct { Req *types.AddServiceConsoleVirtualNic `xml:"urn:vim25 AddServiceConsoleVirtualNic,omitempty"` - Res *types.AddServiceConsoleVirtualNicResponse `xml:"urn:vim25 AddServiceConsoleVirtualNicResponse,omitempty"` + Res *types.AddServiceConsoleVirtualNicResponse `xml:"AddServiceConsoleVirtualNicResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -545,7 +585,7 @@ func AddServiceConsoleVirtualNic(ctx context.Context, r soap.RoundTripper, req * type AddStandaloneHost_TaskBody struct { Req *types.AddStandaloneHost_Task `xml:"urn:vim25 AddStandaloneHost_Task,omitempty"` - Res *types.AddStandaloneHost_TaskResponse `xml:"urn:vim25 AddStandaloneHost_TaskResponse,omitempty"` + Res *types.AddStandaloneHost_TaskResponse `xml:"AddStandaloneHost_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -565,7 +605,7 @@ func AddStandaloneHost_Task(ctx context.Context, r soap.RoundTripper, req *types type AddVirtualNicBody struct { Req *types.AddVirtualNic `xml:"urn:vim25 AddVirtualNic,omitempty"` - Res *types.AddVirtualNicResponse `xml:"urn:vim25 AddVirtualNicResponse,omitempty"` + Res *types.AddVirtualNicResponse `xml:"AddVirtualNicResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -585,7 +625,7 @@ func AddVirtualNic(ctx context.Context, r soap.RoundTripper, req *types.AddVirtu type AddVirtualSwitchBody struct { Req *types.AddVirtualSwitch `xml:"urn:vim25 AddVirtualSwitch,omitempty"` - Res *types.AddVirtualSwitchResponse `xml:"urn:vim25 AddVirtualSwitchResponse,omitempty"` + Res *types.AddVirtualSwitchResponse `xml:"AddVirtualSwitchResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -605,7 +645,7 @@ func AddVirtualSwitch(ctx context.Context, r soap.RoundTripper, req *types.AddVi type AllocateIpv4AddressBody struct { Req *types.AllocateIpv4Address `xml:"urn:vim25 AllocateIpv4Address,omitempty"` - Res *types.AllocateIpv4AddressResponse `xml:"urn:vim25 AllocateIpv4AddressResponse,omitempty"` + Res *types.AllocateIpv4AddressResponse `xml:"AllocateIpv4AddressResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -625,7 +665,7 @@ func AllocateIpv4Address(ctx context.Context, r soap.RoundTripper, req *types.Al type AllocateIpv6AddressBody struct { Req *types.AllocateIpv6Address `xml:"urn:vim25 AllocateIpv6Address,omitempty"` - Res *types.AllocateIpv6AddressResponse `xml:"urn:vim25 AllocateIpv6AddressResponse,omitempty"` + Res *types.AllocateIpv6AddressResponse `xml:"AllocateIpv6AddressResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -645,7 +685,7 @@ func AllocateIpv6Address(ctx context.Context, r soap.RoundTripper, req *types.Al type AnswerVMBody struct { Req *types.AnswerVM `xml:"urn:vim25 AnswerVM,omitempty"` - Res *types.AnswerVMResponse `xml:"urn:vim25 AnswerVMResponse,omitempty"` + Res *types.AnswerVMResponse `xml:"AnswerVMResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -665,7 +705,7 @@ func AnswerVM(ctx context.Context, r soap.RoundTripper, req *types.AnswerVM) (*t type ApplyEntitiesConfig_TaskBody struct { Req *types.ApplyEntitiesConfig_Task `xml:"urn:vim25 ApplyEntitiesConfig_Task,omitempty"` - Res *types.ApplyEntitiesConfig_TaskResponse `xml:"urn:vim25 ApplyEntitiesConfig_TaskResponse,omitempty"` + Res *types.ApplyEntitiesConfig_TaskResponse `xml:"ApplyEntitiesConfig_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -685,7 +725,7 @@ func ApplyEntitiesConfig_Task(ctx context.Context, r soap.RoundTripper, req *typ type ApplyEvcModeVM_TaskBody struct { Req *types.ApplyEvcModeVM_Task `xml:"urn:vim25 ApplyEvcModeVM_Task,omitempty"` - Res *types.ApplyEvcModeVM_TaskResponse `xml:"urn:vim25 ApplyEvcModeVM_TaskResponse,omitempty"` + Res *types.ApplyEvcModeVM_TaskResponse `xml:"ApplyEvcModeVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -705,7 +745,7 @@ func ApplyEvcModeVM_Task(ctx context.Context, r soap.RoundTripper, req *types.Ap type ApplyHostConfig_TaskBody struct { Req *types.ApplyHostConfig_Task `xml:"urn:vim25 ApplyHostConfig_Task,omitempty"` - Res *types.ApplyHostConfig_TaskResponse `xml:"urn:vim25 ApplyHostConfig_TaskResponse,omitempty"` + Res *types.ApplyHostConfig_TaskResponse `xml:"ApplyHostConfig_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -725,7 +765,7 @@ func ApplyHostConfig_Task(ctx context.Context, r soap.RoundTripper, req *types.A type ApplyRecommendationBody struct { Req *types.ApplyRecommendation `xml:"urn:vim25 ApplyRecommendation,omitempty"` - Res *types.ApplyRecommendationResponse `xml:"urn:vim25 ApplyRecommendationResponse,omitempty"` + Res *types.ApplyRecommendationResponse `xml:"ApplyRecommendationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -745,7 +785,7 @@ func ApplyRecommendation(ctx context.Context, r soap.RoundTripper, req *types.Ap type ApplyStorageDrsRecommendationToPod_TaskBody struct { Req *types.ApplyStorageDrsRecommendationToPod_Task `xml:"urn:vim25 ApplyStorageDrsRecommendationToPod_Task,omitempty"` - Res *types.ApplyStorageDrsRecommendationToPod_TaskResponse `xml:"urn:vim25 ApplyStorageDrsRecommendationToPod_TaskResponse,omitempty"` + Res *types.ApplyStorageDrsRecommendationToPod_TaskResponse `xml:"ApplyStorageDrsRecommendationToPod_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -765,7 +805,7 @@ func ApplyStorageDrsRecommendationToPod_Task(ctx context.Context, r soap.RoundTr type ApplyStorageDrsRecommendation_TaskBody struct { Req *types.ApplyStorageDrsRecommendation_Task `xml:"urn:vim25 ApplyStorageDrsRecommendation_Task,omitempty"` - Res *types.ApplyStorageDrsRecommendation_TaskResponse `xml:"urn:vim25 ApplyStorageDrsRecommendation_TaskResponse,omitempty"` + Res *types.ApplyStorageDrsRecommendation_TaskResponse `xml:"ApplyStorageDrsRecommendation_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -785,7 +825,7 @@ func ApplyStorageDrsRecommendation_Task(ctx context.Context, r soap.RoundTripper type AreAlarmActionsEnabledBody struct { Req *types.AreAlarmActionsEnabled `xml:"urn:vim25 AreAlarmActionsEnabled,omitempty"` - Res *types.AreAlarmActionsEnabledResponse `xml:"urn:vim25 AreAlarmActionsEnabledResponse,omitempty"` + Res *types.AreAlarmActionsEnabledResponse `xml:"AreAlarmActionsEnabledResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -805,7 +845,7 @@ func AreAlarmActionsEnabled(ctx context.Context, r soap.RoundTripper, req *types type AssignUserToGroupBody struct { Req *types.AssignUserToGroup `xml:"urn:vim25 AssignUserToGroup,omitempty"` - Res *types.AssignUserToGroupResponse `xml:"urn:vim25 AssignUserToGroupResponse,omitempty"` + Res *types.AssignUserToGroupResponse `xml:"AssignUserToGroupResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -825,7 +865,7 @@ func AssignUserToGroup(ctx context.Context, r soap.RoundTripper, req *types.Assi type AssociateProfileBody struct { Req *types.AssociateProfile `xml:"urn:vim25 AssociateProfile,omitempty"` - Res *types.AssociateProfileResponse `xml:"urn:vim25 AssociateProfileResponse,omitempty"` + Res *types.AssociateProfileResponse `xml:"AssociateProfileResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -845,7 +885,7 @@ func AssociateProfile(ctx context.Context, r soap.RoundTripper, req *types.Assoc type AttachDisk_TaskBody struct { Req *types.AttachDisk_Task `xml:"urn:vim25 AttachDisk_Task,omitempty"` - Res *types.AttachDisk_TaskResponse `xml:"urn:vim25 AttachDisk_TaskResponse,omitempty"` + Res *types.AttachDisk_TaskResponse `xml:"AttachDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -865,7 +905,7 @@ func AttachDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.Attach type AttachScsiLunBody struct { Req *types.AttachScsiLun `xml:"urn:vim25 AttachScsiLun,omitempty"` - Res *types.AttachScsiLunResponse `xml:"urn:vim25 AttachScsiLunResponse,omitempty"` + Res *types.AttachScsiLunResponse `xml:"AttachScsiLunResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -885,7 +925,7 @@ func AttachScsiLun(ctx context.Context, r soap.RoundTripper, req *types.AttachSc type AttachScsiLunEx_TaskBody struct { Req *types.AttachScsiLunEx_Task `xml:"urn:vim25 AttachScsiLunEx_Task,omitempty"` - Res *types.AttachScsiLunEx_TaskResponse `xml:"urn:vim25 AttachScsiLunEx_TaskResponse,omitempty"` + Res *types.AttachScsiLunEx_TaskResponse `xml:"AttachScsiLunEx_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -905,7 +945,7 @@ func AttachScsiLunEx_Task(ctx context.Context, r soap.RoundTripper, req *types.A type AttachTagToVStorageObjectBody struct { Req *types.AttachTagToVStorageObject `xml:"urn:vim25 AttachTagToVStorageObject,omitempty"` - Res *types.AttachTagToVStorageObjectResponse `xml:"urn:vim25 AttachTagToVStorageObjectResponse,omitempty"` + Res *types.AttachTagToVStorageObjectResponse `xml:"AttachTagToVStorageObjectResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -925,7 +965,7 @@ func AttachTagToVStorageObject(ctx context.Context, r soap.RoundTripper, req *ty type AttachVmfsExtentBody struct { Req *types.AttachVmfsExtent `xml:"urn:vim25 AttachVmfsExtent,omitempty"` - Res *types.AttachVmfsExtentResponse `xml:"urn:vim25 AttachVmfsExtentResponse,omitempty"` + Res *types.AttachVmfsExtentResponse `xml:"AttachVmfsExtentResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -945,7 +985,7 @@ func AttachVmfsExtent(ctx context.Context, r soap.RoundTripper, req *types.Attac type AutoStartPowerOffBody struct { Req *types.AutoStartPowerOff `xml:"urn:vim25 AutoStartPowerOff,omitempty"` - Res *types.AutoStartPowerOffResponse `xml:"urn:vim25 AutoStartPowerOffResponse,omitempty"` + Res *types.AutoStartPowerOffResponse `xml:"AutoStartPowerOffResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -965,7 +1005,7 @@ func AutoStartPowerOff(ctx context.Context, r soap.RoundTripper, req *types.Auto type AutoStartPowerOnBody struct { Req *types.AutoStartPowerOn `xml:"urn:vim25 AutoStartPowerOn,omitempty"` - Res *types.AutoStartPowerOnResponse `xml:"urn:vim25 AutoStartPowerOnResponse,omitempty"` + Res *types.AutoStartPowerOnResponse `xml:"AutoStartPowerOnResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -985,7 +1025,7 @@ func AutoStartPowerOn(ctx context.Context, r soap.RoundTripper, req *types.AutoS type BackupFirmwareConfigurationBody struct { Req *types.BackupFirmwareConfiguration `xml:"urn:vim25 BackupFirmwareConfiguration,omitempty"` - Res *types.BackupFirmwareConfigurationResponse `xml:"urn:vim25 BackupFirmwareConfigurationResponse,omitempty"` + Res *types.BackupFirmwareConfigurationResponse `xml:"BackupFirmwareConfigurationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1003,9 +1043,69 @@ func BackupFirmwareConfiguration(ctx context.Context, r soap.RoundTripper, req * return resBody.Res, nil } +type BatchAddHostsToCluster_TaskBody struct { + Req *types.BatchAddHostsToCluster_Task `xml:"urn:vim25 BatchAddHostsToCluster_Task,omitempty"` + Res *types.BatchAddHostsToCluster_TaskResponse `xml:"BatchAddHostsToCluster_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *BatchAddHostsToCluster_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func BatchAddHostsToCluster_Task(ctx context.Context, r soap.RoundTripper, req *types.BatchAddHostsToCluster_Task) (*types.BatchAddHostsToCluster_TaskResponse, error) { + var reqBody, resBody BatchAddHostsToCluster_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type BatchAddStandaloneHosts_TaskBody struct { + Req *types.BatchAddStandaloneHosts_Task `xml:"urn:vim25 BatchAddStandaloneHosts_Task,omitempty"` + Res *types.BatchAddStandaloneHosts_TaskResponse `xml:"BatchAddStandaloneHosts_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *BatchAddStandaloneHosts_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func BatchAddStandaloneHosts_Task(ctx context.Context, r soap.RoundTripper, req *types.BatchAddStandaloneHosts_Task) (*types.BatchAddStandaloneHosts_TaskResponse, error) { + var reqBody, resBody BatchAddStandaloneHosts_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type BatchQueryConnectInfoBody struct { + Req *types.BatchQueryConnectInfo `xml:"urn:vim25 BatchQueryConnectInfo,omitempty"` + Res *types.BatchQueryConnectInfoResponse `xml:"BatchQueryConnectInfoResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *BatchQueryConnectInfoBody) Fault() *soap.Fault { return b.Fault_ } + +func BatchQueryConnectInfo(ctx context.Context, r soap.RoundTripper, req *types.BatchQueryConnectInfo) (*types.BatchQueryConnectInfoResponse, error) { + var reqBody, resBody BatchQueryConnectInfoBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type BindVnicBody struct { Req *types.BindVnic `xml:"urn:vim25 BindVnic,omitempty"` - Res *types.BindVnicResponse `xml:"urn:vim25 BindVnicResponse,omitempty"` + Res *types.BindVnicResponse `xml:"BindVnicResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1025,7 +1125,7 @@ func BindVnic(ctx context.Context, r soap.RoundTripper, req *types.BindVnic) (*t type BrowseDiagnosticLogBody struct { Req *types.BrowseDiagnosticLog `xml:"urn:vim25 BrowseDiagnosticLog,omitempty"` - Res *types.BrowseDiagnosticLogResponse `xml:"urn:vim25 BrowseDiagnosticLogResponse,omitempty"` + Res *types.BrowseDiagnosticLogResponse `xml:"BrowseDiagnosticLogResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1045,7 +1145,7 @@ func BrowseDiagnosticLog(ctx context.Context, r soap.RoundTripper, req *types.Br type CanProvisionObjectsBody struct { Req *types.CanProvisionObjects `xml:"urn:vim25 CanProvisionObjects,omitempty"` - Res *types.CanProvisionObjectsResponse `xml:"urn:vim25 CanProvisionObjectsResponse,omitempty"` + Res *types.CanProvisionObjectsResponse `xml:"CanProvisionObjectsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1065,7 +1165,7 @@ func CanProvisionObjects(ctx context.Context, r soap.RoundTripper, req *types.Ca type CancelRecommendationBody struct { Req *types.CancelRecommendation `xml:"urn:vim25 CancelRecommendation,omitempty"` - Res *types.CancelRecommendationResponse `xml:"urn:vim25 CancelRecommendationResponse,omitempty"` + Res *types.CancelRecommendationResponse `xml:"CancelRecommendationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1085,7 +1185,7 @@ func CancelRecommendation(ctx context.Context, r soap.RoundTripper, req *types.C type CancelRetrievePropertiesExBody struct { Req *types.CancelRetrievePropertiesEx `xml:"urn:vim25 CancelRetrievePropertiesEx,omitempty"` - Res *types.CancelRetrievePropertiesExResponse `xml:"urn:vim25 CancelRetrievePropertiesExResponse,omitempty"` + Res *types.CancelRetrievePropertiesExResponse `xml:"CancelRetrievePropertiesExResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1105,7 +1205,7 @@ func CancelRetrievePropertiesEx(ctx context.Context, r soap.RoundTripper, req *t type CancelStorageDrsRecommendationBody struct { Req *types.CancelStorageDrsRecommendation `xml:"urn:vim25 CancelStorageDrsRecommendation,omitempty"` - Res *types.CancelStorageDrsRecommendationResponse `xml:"urn:vim25 CancelStorageDrsRecommendationResponse,omitempty"` + Res *types.CancelStorageDrsRecommendationResponse `xml:"CancelStorageDrsRecommendationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1125,7 +1225,7 @@ func CancelStorageDrsRecommendation(ctx context.Context, r soap.RoundTripper, re type CancelTaskBody struct { Req *types.CancelTask `xml:"urn:vim25 CancelTask,omitempty"` - Res *types.CancelTaskResponse `xml:"urn:vim25 CancelTaskResponse,omitempty"` + Res *types.CancelTaskResponse `xml:"CancelTaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1145,7 +1245,7 @@ func CancelTask(ctx context.Context, r soap.RoundTripper, req *types.CancelTask) type CancelWaitForUpdatesBody struct { Req *types.CancelWaitForUpdates `xml:"urn:vim25 CancelWaitForUpdates,omitempty"` - Res *types.CancelWaitForUpdatesResponse `xml:"urn:vim25 CancelWaitForUpdatesResponse,omitempty"` + Res *types.CancelWaitForUpdatesResponse `xml:"CancelWaitForUpdatesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1165,7 +1265,7 @@ func CancelWaitForUpdates(ctx context.Context, r soap.RoundTripper, req *types.C type CertMgrRefreshCACertificatesAndCRLs_TaskBody struct { Req *types.CertMgrRefreshCACertificatesAndCRLs_Task `xml:"urn:vim25 CertMgrRefreshCACertificatesAndCRLs_Task,omitempty"` - Res *types.CertMgrRefreshCACertificatesAndCRLs_TaskResponse `xml:"urn:vim25 CertMgrRefreshCACertificatesAndCRLs_TaskResponse,omitempty"` + Res *types.CertMgrRefreshCACertificatesAndCRLs_TaskResponse `xml:"CertMgrRefreshCACertificatesAndCRLs_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1185,7 +1285,7 @@ func CertMgrRefreshCACertificatesAndCRLs_Task(ctx context.Context, r soap.RoundT type CertMgrRefreshCertificates_TaskBody struct { Req *types.CertMgrRefreshCertificates_Task `xml:"urn:vim25 CertMgrRefreshCertificates_Task,omitempty"` - Res *types.CertMgrRefreshCertificates_TaskResponse `xml:"urn:vim25 CertMgrRefreshCertificates_TaskResponse,omitempty"` + Res *types.CertMgrRefreshCertificates_TaskResponse `xml:"CertMgrRefreshCertificates_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1205,7 +1305,7 @@ func CertMgrRefreshCertificates_Task(ctx context.Context, r soap.RoundTripper, r type CertMgrRevokeCertificates_TaskBody struct { Req *types.CertMgrRevokeCertificates_Task `xml:"urn:vim25 CertMgrRevokeCertificates_Task,omitempty"` - Res *types.CertMgrRevokeCertificates_TaskResponse `xml:"urn:vim25 CertMgrRevokeCertificates_TaskResponse,omitempty"` + Res *types.CertMgrRevokeCertificates_TaskResponse `xml:"CertMgrRevokeCertificates_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1225,7 +1325,7 @@ func CertMgrRevokeCertificates_Task(ctx context.Context, r soap.RoundTripper, re type ChangeAccessModeBody struct { Req *types.ChangeAccessMode `xml:"urn:vim25 ChangeAccessMode,omitempty"` - Res *types.ChangeAccessModeResponse `xml:"urn:vim25 ChangeAccessModeResponse,omitempty"` + Res *types.ChangeAccessModeResponse `xml:"ChangeAccessModeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1245,7 +1345,7 @@ func ChangeAccessMode(ctx context.Context, r soap.RoundTripper, req *types.Chang type ChangeFileAttributesInGuestBody struct { Req *types.ChangeFileAttributesInGuest `xml:"urn:vim25 ChangeFileAttributesInGuest,omitempty"` - Res *types.ChangeFileAttributesInGuestResponse `xml:"urn:vim25 ChangeFileAttributesInGuestResponse,omitempty"` + Res *types.ChangeFileAttributesInGuestResponse `xml:"ChangeFileAttributesInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1265,7 +1365,7 @@ func ChangeFileAttributesInGuest(ctx context.Context, r soap.RoundTripper, req * type ChangeKey_TaskBody struct { Req *types.ChangeKey_Task `xml:"urn:vim25 ChangeKey_Task,omitempty"` - Res *types.ChangeKey_TaskResponse `xml:"urn:vim25 ChangeKey_TaskResponse,omitempty"` + Res *types.ChangeKey_TaskResponse `xml:"ChangeKey_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1285,7 +1385,7 @@ func ChangeKey_Task(ctx context.Context, r soap.RoundTripper, req *types.ChangeK type ChangeLockdownModeBody struct { Req *types.ChangeLockdownMode `xml:"urn:vim25 ChangeLockdownMode,omitempty"` - Res *types.ChangeLockdownModeResponse `xml:"urn:vim25 ChangeLockdownModeResponse,omitempty"` + Res *types.ChangeLockdownModeResponse `xml:"ChangeLockdownModeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1305,7 +1405,7 @@ func ChangeLockdownMode(ctx context.Context, r soap.RoundTripper, req *types.Cha type ChangeNFSUserPasswordBody struct { Req *types.ChangeNFSUserPassword `xml:"urn:vim25 ChangeNFSUserPassword,omitempty"` - Res *types.ChangeNFSUserPasswordResponse `xml:"urn:vim25 ChangeNFSUserPasswordResponse,omitempty"` + Res *types.ChangeNFSUserPasswordResponse `xml:"ChangeNFSUserPasswordResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1325,7 +1425,7 @@ func ChangeNFSUserPassword(ctx context.Context, r soap.RoundTripper, req *types. type ChangeOwnerBody struct { Req *types.ChangeOwner `xml:"urn:vim25 ChangeOwner,omitempty"` - Res *types.ChangeOwnerResponse `xml:"urn:vim25 ChangeOwnerResponse,omitempty"` + Res *types.ChangeOwnerResponse `xml:"ChangeOwnerResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1343,9 +1443,29 @@ func ChangeOwner(ctx context.Context, r soap.RoundTripper, req *types.ChangeOwne return resBody.Res, nil } +type ChangePasswordBody struct { + Req *types.ChangePassword `xml:"urn:vim25 ChangePassword,omitempty"` + Res *types.ChangePasswordResponse `xml:"ChangePasswordResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ChangePasswordBody) Fault() *soap.Fault { return b.Fault_ } + +func ChangePassword(ctx context.Context, r soap.RoundTripper, req *types.ChangePassword) (*types.ChangePasswordResponse, error) { + var reqBody, resBody ChangePasswordBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type CheckAddHostEvc_TaskBody struct { Req *types.CheckAddHostEvc_Task `xml:"urn:vim25 CheckAddHostEvc_Task,omitempty"` - Res *types.CheckAddHostEvc_TaskResponse `xml:"urn:vim25 CheckAddHostEvc_TaskResponse,omitempty"` + Res *types.CheckAddHostEvc_TaskResponse `xml:"CheckAddHostEvc_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1365,7 +1485,7 @@ func CheckAddHostEvc_Task(ctx context.Context, r soap.RoundTripper, req *types.C type CheckAnswerFileStatus_TaskBody struct { Req *types.CheckAnswerFileStatus_Task `xml:"urn:vim25 CheckAnswerFileStatus_Task,omitempty"` - Res *types.CheckAnswerFileStatus_TaskResponse `xml:"urn:vim25 CheckAnswerFileStatus_TaskResponse,omitempty"` + Res *types.CheckAnswerFileStatus_TaskResponse `xml:"CheckAnswerFileStatus_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1385,7 +1505,7 @@ func CheckAnswerFileStatus_Task(ctx context.Context, r soap.RoundTripper, req *t type CheckClone_TaskBody struct { Req *types.CheckClone_Task `xml:"urn:vim25 CheckClone_Task,omitempty"` - Res *types.CheckClone_TaskResponse `xml:"urn:vim25 CheckClone_TaskResponse,omitempty"` + Res *types.CheckClone_TaskResponse `xml:"CheckClone_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1405,7 +1525,7 @@ func CheckClone_Task(ctx context.Context, r soap.RoundTripper, req *types.CheckC type CheckCompatibility_TaskBody struct { Req *types.CheckCompatibility_Task `xml:"urn:vim25 CheckCompatibility_Task,omitempty"` - Res *types.CheckCompatibility_TaskResponse `xml:"urn:vim25 CheckCompatibility_TaskResponse,omitempty"` + Res *types.CheckCompatibility_TaskResponse `xml:"CheckCompatibility_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1425,7 +1545,7 @@ func CheckCompatibility_Task(ctx context.Context, r soap.RoundTripper, req *type type CheckCompliance_TaskBody struct { Req *types.CheckCompliance_Task `xml:"urn:vim25 CheckCompliance_Task,omitempty"` - Res *types.CheckCompliance_TaskResponse `xml:"urn:vim25 CheckCompliance_TaskResponse,omitempty"` + Res *types.CheckCompliance_TaskResponse `xml:"CheckCompliance_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1445,7 +1565,7 @@ func CheckCompliance_Task(ctx context.Context, r soap.RoundTripper, req *types.C type CheckConfigureEvcMode_TaskBody struct { Req *types.CheckConfigureEvcMode_Task `xml:"urn:vim25 CheckConfigureEvcMode_Task,omitempty"` - Res *types.CheckConfigureEvcMode_TaskResponse `xml:"urn:vim25 CheckConfigureEvcMode_TaskResponse,omitempty"` + Res *types.CheckConfigureEvcMode_TaskResponse `xml:"CheckConfigureEvcMode_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1465,7 +1585,7 @@ func CheckConfigureEvcMode_Task(ctx context.Context, r soap.RoundTripper, req *t type CheckCustomizationResourcesBody struct { Req *types.CheckCustomizationResources `xml:"urn:vim25 CheckCustomizationResources,omitempty"` - Res *types.CheckCustomizationResourcesResponse `xml:"urn:vim25 CheckCustomizationResourcesResponse,omitempty"` + Res *types.CheckCustomizationResourcesResponse `xml:"CheckCustomizationResourcesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1485,7 +1605,7 @@ func CheckCustomizationResources(ctx context.Context, r soap.RoundTripper, req * type CheckCustomizationSpecBody struct { Req *types.CheckCustomizationSpec `xml:"urn:vim25 CheckCustomizationSpec,omitempty"` - Res *types.CheckCustomizationSpecResponse `xml:"urn:vim25 CheckCustomizationSpecResponse,omitempty"` + Res *types.CheckCustomizationSpecResponse `xml:"CheckCustomizationSpecResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1505,7 +1625,7 @@ func CheckCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *types type CheckForUpdatesBody struct { Req *types.CheckForUpdates `xml:"urn:vim25 CheckForUpdates,omitempty"` - Res *types.CheckForUpdatesResponse `xml:"urn:vim25 CheckForUpdatesResponse,omitempty"` + Res *types.CheckForUpdatesResponse `xml:"CheckForUpdatesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1525,7 +1645,7 @@ func CheckForUpdates(ctx context.Context, r soap.RoundTripper, req *types.CheckF type CheckHostPatch_TaskBody struct { Req *types.CheckHostPatch_Task `xml:"urn:vim25 CheckHostPatch_Task,omitempty"` - Res *types.CheckHostPatch_TaskResponse `xml:"urn:vim25 CheckHostPatch_TaskResponse,omitempty"` + Res *types.CheckHostPatch_TaskResponse `xml:"CheckHostPatch_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1545,7 +1665,7 @@ func CheckHostPatch_Task(ctx context.Context, r soap.RoundTripper, req *types.Ch type CheckInstantClone_TaskBody struct { Req *types.CheckInstantClone_Task `xml:"urn:vim25 CheckInstantClone_Task,omitempty"` - Res *types.CheckInstantClone_TaskResponse `xml:"urn:vim25 CheckInstantClone_TaskResponse,omitempty"` + Res *types.CheckInstantClone_TaskResponse `xml:"CheckInstantClone_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1565,7 +1685,7 @@ func CheckInstantClone_Task(ctx context.Context, r soap.RoundTripper, req *types type CheckLicenseFeatureBody struct { Req *types.CheckLicenseFeature `xml:"urn:vim25 CheckLicenseFeature,omitempty"` - Res *types.CheckLicenseFeatureResponse `xml:"urn:vim25 CheckLicenseFeatureResponse,omitempty"` + Res *types.CheckLicenseFeatureResponse `xml:"CheckLicenseFeatureResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1585,7 +1705,7 @@ func CheckLicenseFeature(ctx context.Context, r soap.RoundTripper, req *types.Ch type CheckMigrate_TaskBody struct { Req *types.CheckMigrate_Task `xml:"urn:vim25 CheckMigrate_Task,omitempty"` - Res *types.CheckMigrate_TaskResponse `xml:"urn:vim25 CheckMigrate_TaskResponse,omitempty"` + Res *types.CheckMigrate_TaskResponse `xml:"CheckMigrate_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1605,7 +1725,7 @@ func CheckMigrate_Task(ctx context.Context, r soap.RoundTripper, req *types.Chec type CheckPowerOn_TaskBody struct { Req *types.CheckPowerOn_Task `xml:"urn:vim25 CheckPowerOn_Task,omitempty"` - Res *types.CheckPowerOn_TaskResponse `xml:"urn:vim25 CheckPowerOn_TaskResponse,omitempty"` + Res *types.CheckPowerOn_TaskResponse `xml:"CheckPowerOn_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1625,7 +1745,7 @@ func CheckPowerOn_Task(ctx context.Context, r soap.RoundTripper, req *types.Chec type CheckProfileCompliance_TaskBody struct { Req *types.CheckProfileCompliance_Task `xml:"urn:vim25 CheckProfileCompliance_Task,omitempty"` - Res *types.CheckProfileCompliance_TaskResponse `xml:"urn:vim25 CheckProfileCompliance_TaskResponse,omitempty"` + Res *types.CheckProfileCompliance_TaskResponse `xml:"CheckProfileCompliance_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1645,7 +1765,7 @@ func CheckProfileCompliance_Task(ctx context.Context, r soap.RoundTripper, req * type CheckRelocate_TaskBody struct { Req *types.CheckRelocate_Task `xml:"urn:vim25 CheckRelocate_Task,omitempty"` - Res *types.CheckRelocate_TaskResponse `xml:"urn:vim25 CheckRelocate_TaskResponse,omitempty"` + Res *types.CheckRelocate_TaskResponse `xml:"CheckRelocate_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1665,7 +1785,7 @@ func CheckRelocate_Task(ctx context.Context, r soap.RoundTripper, req *types.Che type CheckVmConfig_TaskBody struct { Req *types.CheckVmConfig_Task `xml:"urn:vim25 CheckVmConfig_Task,omitempty"` - Res *types.CheckVmConfig_TaskResponse `xml:"urn:vim25 CheckVmConfig_TaskResponse,omitempty"` + Res *types.CheckVmConfig_TaskResponse `xml:"CheckVmConfig_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1685,7 +1805,7 @@ func CheckVmConfig_Task(ctx context.Context, r soap.RoundTripper, req *types.Che type ClearComplianceStatusBody struct { Req *types.ClearComplianceStatus `xml:"urn:vim25 ClearComplianceStatus,omitempty"` - Res *types.ClearComplianceStatusResponse `xml:"urn:vim25 ClearComplianceStatusResponse,omitempty"` + Res *types.ClearComplianceStatusResponse `xml:"ClearComplianceStatusResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1705,7 +1825,7 @@ func ClearComplianceStatus(ctx context.Context, r soap.RoundTripper, req *types. type ClearNFSUserBody struct { Req *types.ClearNFSUser `xml:"urn:vim25 ClearNFSUser,omitempty"` - Res *types.ClearNFSUserResponse `xml:"urn:vim25 ClearNFSUserResponse,omitempty"` + Res *types.ClearNFSUserResponse `xml:"ClearNFSUserResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1725,7 +1845,7 @@ func ClearNFSUser(ctx context.Context, r soap.RoundTripper, req *types.ClearNFSU type ClearSystemEventLogBody struct { Req *types.ClearSystemEventLog `xml:"urn:vim25 ClearSystemEventLog,omitempty"` - Res *types.ClearSystemEventLogResponse `xml:"urn:vim25 ClearSystemEventLogResponse,omitempty"` + Res *types.ClearSystemEventLogResponse `xml:"ClearSystemEventLogResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1745,7 +1865,7 @@ func ClearSystemEventLog(ctx context.Context, r soap.RoundTripper, req *types.Cl type ClearTriggeredAlarmsBody struct { Req *types.ClearTriggeredAlarms `xml:"urn:vim25 ClearTriggeredAlarms,omitempty"` - Res *types.ClearTriggeredAlarmsResponse `xml:"urn:vim25 ClearTriggeredAlarmsResponse,omitempty"` + Res *types.ClearTriggeredAlarmsResponse `xml:"ClearTriggeredAlarmsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1765,7 +1885,7 @@ func ClearTriggeredAlarms(ctx context.Context, r soap.RoundTripper, req *types.C type ClearVStorageObjectControlFlagsBody struct { Req *types.ClearVStorageObjectControlFlags `xml:"urn:vim25 ClearVStorageObjectControlFlags,omitempty"` - Res *types.ClearVStorageObjectControlFlagsResponse `xml:"urn:vim25 ClearVStorageObjectControlFlagsResponse,omitempty"` + Res *types.ClearVStorageObjectControlFlagsResponse `xml:"ClearVStorageObjectControlFlagsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1785,7 +1905,7 @@ func ClearVStorageObjectControlFlags(ctx context.Context, r soap.RoundTripper, r type CloneSessionBody struct { Req *types.CloneSession `xml:"urn:vim25 CloneSession,omitempty"` - Res *types.CloneSessionResponse `xml:"urn:vim25 CloneSessionResponse,omitempty"` + Res *types.CloneSessionResponse `xml:"CloneSessionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1805,7 +1925,7 @@ func CloneSession(ctx context.Context, r soap.RoundTripper, req *types.CloneSess type CloneVApp_TaskBody struct { Req *types.CloneVApp_Task `xml:"urn:vim25 CloneVApp_Task,omitempty"` - Res *types.CloneVApp_TaskResponse `xml:"urn:vim25 CloneVApp_TaskResponse,omitempty"` + Res *types.CloneVApp_TaskResponse `xml:"CloneVApp_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1825,7 +1945,7 @@ func CloneVApp_Task(ctx context.Context, r soap.RoundTripper, req *types.CloneVA type CloneVM_TaskBody struct { Req *types.CloneVM_Task `xml:"urn:vim25 CloneVM_Task,omitempty"` - Res *types.CloneVM_TaskResponse `xml:"urn:vim25 CloneVM_TaskResponse,omitempty"` + Res *types.CloneVM_TaskResponse `xml:"CloneVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1845,7 +1965,7 @@ func CloneVM_Task(ctx context.Context, r soap.RoundTripper, req *types.CloneVM_T type CloneVStorageObject_TaskBody struct { Req *types.CloneVStorageObject_Task `xml:"urn:vim25 CloneVStorageObject_Task,omitempty"` - Res *types.CloneVStorageObject_TaskResponse `xml:"urn:vim25 CloneVStorageObject_TaskResponse,omitempty"` + Res *types.CloneVStorageObject_TaskResponse `xml:"CloneVStorageObject_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1865,7 +1985,7 @@ func CloneVStorageObject_Task(ctx context.Context, r soap.RoundTripper, req *typ type CloseInventoryViewFolderBody struct { Req *types.CloseInventoryViewFolder `xml:"urn:vim25 CloseInventoryViewFolder,omitempty"` - Res *types.CloseInventoryViewFolderResponse `xml:"urn:vim25 CloseInventoryViewFolderResponse,omitempty"` + Res *types.CloseInventoryViewFolderResponse `xml:"CloseInventoryViewFolderResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1885,7 +2005,7 @@ func CloseInventoryViewFolder(ctx context.Context, r soap.RoundTripper, req *typ type ClusterEnterMaintenanceModeBody struct { Req *types.ClusterEnterMaintenanceMode `xml:"urn:vim25 ClusterEnterMaintenanceMode,omitempty"` - Res *types.ClusterEnterMaintenanceModeResponse `xml:"urn:vim25 ClusterEnterMaintenanceModeResponse,omitempty"` + Res *types.ClusterEnterMaintenanceModeResponse `xml:"ClusterEnterMaintenanceModeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1905,7 +2025,7 @@ func ClusterEnterMaintenanceMode(ctx context.Context, r soap.RoundTripper, req * type CompositeHostProfile_TaskBody struct { Req *types.CompositeHostProfile_Task `xml:"urn:vim25 CompositeHostProfile_Task,omitempty"` - Res *types.CompositeHostProfile_TaskResponse `xml:"urn:vim25 CompositeHostProfile_TaskResponse,omitempty"` + Res *types.CompositeHostProfile_TaskResponse `xml:"CompositeHostProfile_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1925,7 +2045,7 @@ func CompositeHostProfile_Task(ctx context.Context, r soap.RoundTripper, req *ty type ComputeDiskPartitionInfoBody struct { Req *types.ComputeDiskPartitionInfo `xml:"urn:vim25 ComputeDiskPartitionInfo,omitempty"` - Res *types.ComputeDiskPartitionInfoResponse `xml:"urn:vim25 ComputeDiskPartitionInfoResponse,omitempty"` + Res *types.ComputeDiskPartitionInfoResponse `xml:"ComputeDiskPartitionInfoResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1945,7 +2065,7 @@ func ComputeDiskPartitionInfo(ctx context.Context, r soap.RoundTripper, req *typ type ComputeDiskPartitionInfoForResizeBody struct { Req *types.ComputeDiskPartitionInfoForResize `xml:"urn:vim25 ComputeDiskPartitionInfoForResize,omitempty"` - Res *types.ComputeDiskPartitionInfoForResizeResponse `xml:"urn:vim25 ComputeDiskPartitionInfoForResizeResponse,omitempty"` + Res *types.ComputeDiskPartitionInfoForResizeResponse `xml:"ComputeDiskPartitionInfoForResizeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1965,7 +2085,7 @@ func ComputeDiskPartitionInfoForResize(ctx context.Context, r soap.RoundTripper, type ConfigureCryptoKeyBody struct { Req *types.ConfigureCryptoKey `xml:"urn:vim25 ConfigureCryptoKey,omitempty"` - Res *types.ConfigureCryptoKeyResponse `xml:"urn:vim25 ConfigureCryptoKeyResponse,omitempty"` + Res *types.ConfigureCryptoKeyResponse `xml:"ConfigureCryptoKeyResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1985,7 +2105,7 @@ func ConfigureCryptoKey(ctx context.Context, r soap.RoundTripper, req *types.Con type ConfigureDatastoreIORM_TaskBody struct { Req *types.ConfigureDatastoreIORM_Task `xml:"urn:vim25 ConfigureDatastoreIORM_Task,omitempty"` - Res *types.ConfigureDatastoreIORM_TaskResponse `xml:"urn:vim25 ConfigureDatastoreIORM_TaskResponse,omitempty"` + Res *types.ConfigureDatastoreIORM_TaskResponse `xml:"ConfigureDatastoreIORM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2005,7 +2125,7 @@ func ConfigureDatastoreIORM_Task(ctx context.Context, r soap.RoundTripper, req * type ConfigureDatastorePrincipalBody struct { Req *types.ConfigureDatastorePrincipal `xml:"urn:vim25 ConfigureDatastorePrincipal,omitempty"` - Res *types.ConfigureDatastorePrincipalResponse `xml:"urn:vim25 ConfigureDatastorePrincipalResponse,omitempty"` + Res *types.ConfigureDatastorePrincipalResponse `xml:"ConfigureDatastorePrincipalResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2025,7 +2145,7 @@ func ConfigureDatastorePrincipal(ctx context.Context, r soap.RoundTripper, req * type ConfigureEvcMode_TaskBody struct { Req *types.ConfigureEvcMode_Task `xml:"urn:vim25 ConfigureEvcMode_Task,omitempty"` - Res *types.ConfigureEvcMode_TaskResponse `xml:"urn:vim25 ConfigureEvcMode_TaskResponse,omitempty"` + Res *types.ConfigureEvcMode_TaskResponse `xml:"ConfigureEvcMode_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2043,9 +2163,29 @@ func ConfigureEvcMode_Task(ctx context.Context, r soap.RoundTripper, req *types. return resBody.Res, nil } +type ConfigureHCI_TaskBody struct { + Req *types.ConfigureHCI_Task `xml:"urn:vim25 ConfigureHCI_Task,omitempty"` + Res *types.ConfigureHCI_TaskResponse `xml:"ConfigureHCI_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ConfigureHCI_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ConfigureHCI_Task(ctx context.Context, r soap.RoundTripper, req *types.ConfigureHCI_Task) (*types.ConfigureHCI_TaskResponse, error) { + var reqBody, resBody ConfigureHCI_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type ConfigureHostCache_TaskBody struct { Req *types.ConfigureHostCache_Task `xml:"urn:vim25 ConfigureHostCache_Task,omitempty"` - Res *types.ConfigureHostCache_TaskResponse `xml:"urn:vim25 ConfigureHostCache_TaskResponse,omitempty"` + Res *types.ConfigureHostCache_TaskResponse `xml:"ConfigureHostCache_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2065,7 +2205,7 @@ func ConfigureHostCache_Task(ctx context.Context, r soap.RoundTripper, req *type type ConfigureLicenseSourceBody struct { Req *types.ConfigureLicenseSource `xml:"urn:vim25 ConfigureLicenseSource,omitempty"` - Res *types.ConfigureLicenseSourceResponse `xml:"urn:vim25 ConfigureLicenseSourceResponse,omitempty"` + Res *types.ConfigureLicenseSourceResponse `xml:"ConfigureLicenseSourceResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2085,7 +2225,7 @@ func ConfigureLicenseSource(ctx context.Context, r soap.RoundTripper, req *types type ConfigurePowerPolicyBody struct { Req *types.ConfigurePowerPolicy `xml:"urn:vim25 ConfigurePowerPolicy,omitempty"` - Res *types.ConfigurePowerPolicyResponse `xml:"urn:vim25 ConfigurePowerPolicyResponse,omitempty"` + Res *types.ConfigurePowerPolicyResponse `xml:"ConfigurePowerPolicyResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2105,7 +2245,7 @@ func ConfigurePowerPolicy(ctx context.Context, r soap.RoundTripper, req *types.C type ConfigureStorageDrsForPod_TaskBody struct { Req *types.ConfigureStorageDrsForPod_Task `xml:"urn:vim25 ConfigureStorageDrsForPod_Task,omitempty"` - Res *types.ConfigureStorageDrsForPod_TaskResponse `xml:"urn:vim25 ConfigureStorageDrsForPod_TaskResponse,omitempty"` + Res *types.ConfigureStorageDrsForPod_TaskResponse `xml:"ConfigureStorageDrsForPod_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2125,7 +2265,7 @@ func ConfigureStorageDrsForPod_Task(ctx context.Context, r soap.RoundTripper, re type ConfigureVFlashResourceEx_TaskBody struct { Req *types.ConfigureVFlashResourceEx_Task `xml:"urn:vim25 ConfigureVFlashResourceEx_Task,omitempty"` - Res *types.ConfigureVFlashResourceEx_TaskResponse `xml:"urn:vim25 ConfigureVFlashResourceEx_TaskResponse,omitempty"` + Res *types.ConfigureVFlashResourceEx_TaskResponse `xml:"ConfigureVFlashResourceEx_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2143,9 +2283,49 @@ func ConfigureVFlashResourceEx_Task(ctx context.Context, r soap.RoundTripper, re return resBody.Res, nil } +type ConnectNvmeControllerBody struct { + Req *types.ConnectNvmeController `xml:"urn:vim25 ConnectNvmeController,omitempty"` + Res *types.ConnectNvmeControllerResponse `xml:"ConnectNvmeControllerResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ConnectNvmeControllerBody) Fault() *soap.Fault { return b.Fault_ } + +func ConnectNvmeController(ctx context.Context, r soap.RoundTripper, req *types.ConnectNvmeController) (*types.ConnectNvmeControllerResponse, error) { + var reqBody, resBody ConnectNvmeControllerBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ConnectNvmeControllerEx_TaskBody struct { + Req *types.ConnectNvmeControllerEx_Task `xml:"urn:vim25 ConnectNvmeControllerEx_Task,omitempty"` + Res *types.ConnectNvmeControllerEx_TaskResponse `xml:"ConnectNvmeControllerEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ConnectNvmeControllerEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ConnectNvmeControllerEx_Task(ctx context.Context, r soap.RoundTripper, req *types.ConnectNvmeControllerEx_Task) (*types.ConnectNvmeControllerEx_TaskResponse, error) { + var reqBody, resBody ConnectNvmeControllerEx_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type ConsolidateVMDisks_TaskBody struct { Req *types.ConsolidateVMDisks_Task `xml:"urn:vim25 ConsolidateVMDisks_Task,omitempty"` - Res *types.ConsolidateVMDisks_TaskResponse `xml:"urn:vim25 ConsolidateVMDisks_TaskResponse,omitempty"` + Res *types.ConsolidateVMDisks_TaskResponse `xml:"ConsolidateVMDisks_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2165,7 +2345,7 @@ func ConsolidateVMDisks_Task(ctx context.Context, r soap.RoundTripper, req *type type ContinueRetrievePropertiesExBody struct { Req *types.ContinueRetrievePropertiesEx `xml:"urn:vim25 ContinueRetrievePropertiesEx,omitempty"` - Res *types.ContinueRetrievePropertiesExResponse `xml:"urn:vim25 ContinueRetrievePropertiesExResponse,omitempty"` + Res *types.ContinueRetrievePropertiesExResponse `xml:"ContinueRetrievePropertiesExResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2185,7 +2365,7 @@ func ContinueRetrievePropertiesEx(ctx context.Context, r soap.RoundTripper, req type ConvertNamespacePathToUuidPathBody struct { Req *types.ConvertNamespacePathToUuidPath `xml:"urn:vim25 ConvertNamespacePathToUuidPath,omitempty"` - Res *types.ConvertNamespacePathToUuidPathResponse `xml:"urn:vim25 ConvertNamespacePathToUuidPathResponse,omitempty"` + Res *types.ConvertNamespacePathToUuidPathResponse `xml:"ConvertNamespacePathToUuidPathResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2205,7 +2385,7 @@ func ConvertNamespacePathToUuidPath(ctx context.Context, r soap.RoundTripper, re type CopyDatastoreFile_TaskBody struct { Req *types.CopyDatastoreFile_Task `xml:"urn:vim25 CopyDatastoreFile_Task,omitempty"` - Res *types.CopyDatastoreFile_TaskResponse `xml:"urn:vim25 CopyDatastoreFile_TaskResponse,omitempty"` + Res *types.CopyDatastoreFile_TaskResponse `xml:"CopyDatastoreFile_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2225,7 +2405,7 @@ func CopyDatastoreFile_Task(ctx context.Context, r soap.RoundTripper, req *types type CopyVirtualDisk_TaskBody struct { Req *types.CopyVirtualDisk_Task `xml:"urn:vim25 CopyVirtualDisk_Task,omitempty"` - Res *types.CopyVirtualDisk_TaskResponse `xml:"urn:vim25 CopyVirtualDisk_TaskResponse,omitempty"` + Res *types.CopyVirtualDisk_TaskResponse `xml:"CopyVirtualDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2245,7 +2425,7 @@ func CopyVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.C type CreateAlarmBody struct { Req *types.CreateAlarm `xml:"urn:vim25 CreateAlarm,omitempty"` - Res *types.CreateAlarmResponse `xml:"urn:vim25 CreateAlarmResponse,omitempty"` + Res *types.CreateAlarmResponse `xml:"CreateAlarmResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2265,7 +2445,7 @@ func CreateAlarm(ctx context.Context, r soap.RoundTripper, req *types.CreateAlar type CreateChildVM_TaskBody struct { Req *types.CreateChildVM_Task `xml:"urn:vim25 CreateChildVM_Task,omitempty"` - Res *types.CreateChildVM_TaskResponse `xml:"urn:vim25 CreateChildVM_TaskResponse,omitempty"` + Res *types.CreateChildVM_TaskResponse `xml:"CreateChildVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2285,7 +2465,7 @@ func CreateChildVM_Task(ctx context.Context, r soap.RoundTripper, req *types.Cre type CreateClusterBody struct { Req *types.CreateCluster `xml:"urn:vim25 CreateCluster,omitempty"` - Res *types.CreateClusterResponse `xml:"urn:vim25 CreateClusterResponse,omitempty"` + Res *types.CreateClusterResponse `xml:"CreateClusterResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2305,7 +2485,7 @@ func CreateCluster(ctx context.Context, r soap.RoundTripper, req *types.CreateCl type CreateClusterExBody struct { Req *types.CreateClusterEx `xml:"urn:vim25 CreateClusterEx,omitempty"` - Res *types.CreateClusterExResponse `xml:"urn:vim25 CreateClusterExResponse,omitempty"` + Res *types.CreateClusterExResponse `xml:"CreateClusterExResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2325,7 +2505,7 @@ func CreateClusterEx(ctx context.Context, r soap.RoundTripper, req *types.Create type CreateCollectorForEventsBody struct { Req *types.CreateCollectorForEvents `xml:"urn:vim25 CreateCollectorForEvents,omitempty"` - Res *types.CreateCollectorForEventsResponse `xml:"urn:vim25 CreateCollectorForEventsResponse,omitempty"` + Res *types.CreateCollectorForEventsResponse `xml:"CreateCollectorForEventsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2345,7 +2525,7 @@ func CreateCollectorForEvents(ctx context.Context, r soap.RoundTripper, req *typ type CreateCollectorForTasksBody struct { Req *types.CreateCollectorForTasks `xml:"urn:vim25 CreateCollectorForTasks,omitempty"` - Res *types.CreateCollectorForTasksResponse `xml:"urn:vim25 CreateCollectorForTasksResponse,omitempty"` + Res *types.CreateCollectorForTasksResponse `xml:"CreateCollectorForTasksResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2365,7 +2545,7 @@ func CreateCollectorForTasks(ctx context.Context, r soap.RoundTripper, req *type type CreateContainerViewBody struct { Req *types.CreateContainerView `xml:"urn:vim25 CreateContainerView,omitempty"` - Res *types.CreateContainerViewResponse `xml:"urn:vim25 CreateContainerViewResponse,omitempty"` + Res *types.CreateContainerViewResponse `xml:"CreateContainerViewResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2385,7 +2565,7 @@ func CreateContainerView(ctx context.Context, r soap.RoundTripper, req *types.Cr type CreateCustomizationSpecBody struct { Req *types.CreateCustomizationSpec `xml:"urn:vim25 CreateCustomizationSpec,omitempty"` - Res *types.CreateCustomizationSpecResponse `xml:"urn:vim25 CreateCustomizationSpecResponse,omitempty"` + Res *types.CreateCustomizationSpecResponse `xml:"CreateCustomizationSpecResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2405,7 +2585,7 @@ func CreateCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *type type CreateDVPortgroup_TaskBody struct { Req *types.CreateDVPortgroup_Task `xml:"urn:vim25 CreateDVPortgroup_Task,omitempty"` - Res *types.CreateDVPortgroup_TaskResponse `xml:"urn:vim25 CreateDVPortgroup_TaskResponse,omitempty"` + Res *types.CreateDVPortgroup_TaskResponse `xml:"CreateDVPortgroup_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2425,7 +2605,7 @@ func CreateDVPortgroup_Task(ctx context.Context, r soap.RoundTripper, req *types type CreateDVS_TaskBody struct { Req *types.CreateDVS_Task `xml:"urn:vim25 CreateDVS_Task,omitempty"` - Res *types.CreateDVS_TaskResponse `xml:"urn:vim25 CreateDVS_TaskResponse,omitempty"` + Res *types.CreateDVS_TaskResponse `xml:"CreateDVS_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2445,7 +2625,7 @@ func CreateDVS_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateD type CreateDatacenterBody struct { Req *types.CreateDatacenter `xml:"urn:vim25 CreateDatacenter,omitempty"` - Res *types.CreateDatacenterResponse `xml:"urn:vim25 CreateDatacenterResponse,omitempty"` + Res *types.CreateDatacenterResponse `xml:"CreateDatacenterResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2465,7 +2645,7 @@ func CreateDatacenter(ctx context.Context, r soap.RoundTripper, req *types.Creat type CreateDefaultProfileBody struct { Req *types.CreateDefaultProfile `xml:"urn:vim25 CreateDefaultProfile,omitempty"` - Res *types.CreateDefaultProfileResponse `xml:"urn:vim25 CreateDefaultProfileResponse,omitempty"` + Res *types.CreateDefaultProfileResponse `xml:"CreateDefaultProfileResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2485,7 +2665,7 @@ func CreateDefaultProfile(ctx context.Context, r soap.RoundTripper, req *types.C type CreateDescriptorBody struct { Req *types.CreateDescriptor `xml:"urn:vim25 CreateDescriptor,omitempty"` - Res *types.CreateDescriptorResponse `xml:"urn:vim25 CreateDescriptorResponse,omitempty"` + Res *types.CreateDescriptorResponse `xml:"CreateDescriptorResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2505,7 +2685,7 @@ func CreateDescriptor(ctx context.Context, r soap.RoundTripper, req *types.Creat type CreateDiagnosticPartitionBody struct { Req *types.CreateDiagnosticPartition `xml:"urn:vim25 CreateDiagnosticPartition,omitempty"` - Res *types.CreateDiagnosticPartitionResponse `xml:"urn:vim25 CreateDiagnosticPartitionResponse,omitempty"` + Res *types.CreateDiagnosticPartitionResponse `xml:"CreateDiagnosticPartitionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2525,7 +2705,7 @@ func CreateDiagnosticPartition(ctx context.Context, r soap.RoundTripper, req *ty type CreateDirectoryBody struct { Req *types.CreateDirectory `xml:"urn:vim25 CreateDirectory,omitempty"` - Res *types.CreateDirectoryResponse `xml:"urn:vim25 CreateDirectoryResponse,omitempty"` + Res *types.CreateDirectoryResponse `xml:"CreateDirectoryResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2545,7 +2725,7 @@ func CreateDirectory(ctx context.Context, r soap.RoundTripper, req *types.Create type CreateDiskFromSnapshot_TaskBody struct { Req *types.CreateDiskFromSnapshot_Task `xml:"urn:vim25 CreateDiskFromSnapshot_Task,omitempty"` - Res *types.CreateDiskFromSnapshot_TaskResponse `xml:"urn:vim25 CreateDiskFromSnapshot_TaskResponse,omitempty"` + Res *types.CreateDiskFromSnapshot_TaskResponse `xml:"CreateDiskFromSnapshot_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2565,7 +2745,7 @@ func CreateDiskFromSnapshot_Task(ctx context.Context, r soap.RoundTripper, req * type CreateDisk_TaskBody struct { Req *types.CreateDisk_Task `xml:"urn:vim25 CreateDisk_Task,omitempty"` - Res *types.CreateDisk_TaskResponse `xml:"urn:vim25 CreateDisk_TaskResponse,omitempty"` + Res *types.CreateDisk_TaskResponse `xml:"CreateDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2585,7 +2765,7 @@ func CreateDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.Create type CreateFilterBody struct { Req *types.CreateFilter `xml:"urn:vim25 CreateFilter,omitempty"` - Res *types.CreateFilterResponse `xml:"urn:vim25 CreateFilterResponse,omitempty"` + Res *types.CreateFilterResponse `xml:"CreateFilterResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2605,7 +2785,7 @@ func CreateFilter(ctx context.Context, r soap.RoundTripper, req *types.CreateFil type CreateFolderBody struct { Req *types.CreateFolder `xml:"urn:vim25 CreateFolder,omitempty"` - Res *types.CreateFolderResponse `xml:"urn:vim25 CreateFolderResponse,omitempty"` + Res *types.CreateFolderResponse `xml:"CreateFolderResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2625,7 +2805,7 @@ func CreateFolder(ctx context.Context, r soap.RoundTripper, req *types.CreateFol type CreateGroupBody struct { Req *types.CreateGroup `xml:"urn:vim25 CreateGroup,omitempty"` - Res *types.CreateGroupResponse `xml:"urn:vim25 CreateGroupResponse,omitempty"` + Res *types.CreateGroupResponse `xml:"CreateGroupResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2645,7 +2825,7 @@ func CreateGroup(ctx context.Context, r soap.RoundTripper, req *types.CreateGrou type CreateImportSpecBody struct { Req *types.CreateImportSpec `xml:"urn:vim25 CreateImportSpec,omitempty"` - Res *types.CreateImportSpecResponse `xml:"urn:vim25 CreateImportSpecResponse,omitempty"` + Res *types.CreateImportSpecResponse `xml:"CreateImportSpecResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2665,7 +2845,7 @@ func CreateImportSpec(ctx context.Context, r soap.RoundTripper, req *types.Creat type CreateInventoryViewBody struct { Req *types.CreateInventoryView `xml:"urn:vim25 CreateInventoryView,omitempty"` - Res *types.CreateInventoryViewResponse `xml:"urn:vim25 CreateInventoryViewResponse,omitempty"` + Res *types.CreateInventoryViewResponse `xml:"CreateInventoryViewResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2685,7 +2865,7 @@ func CreateInventoryView(ctx context.Context, r soap.RoundTripper, req *types.Cr type CreateIpPoolBody struct { Req *types.CreateIpPool `xml:"urn:vim25 CreateIpPool,omitempty"` - Res *types.CreateIpPoolResponse `xml:"urn:vim25 CreateIpPoolResponse,omitempty"` + Res *types.CreateIpPoolResponse `xml:"CreateIpPoolResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2705,7 +2885,7 @@ func CreateIpPool(ctx context.Context, r soap.RoundTripper, req *types.CreateIpP type CreateListViewBody struct { Req *types.CreateListView `xml:"urn:vim25 CreateListView,omitempty"` - Res *types.CreateListViewResponse `xml:"urn:vim25 CreateListViewResponse,omitempty"` + Res *types.CreateListViewResponse `xml:"CreateListViewResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2725,7 +2905,7 @@ func CreateListView(ctx context.Context, r soap.RoundTripper, req *types.CreateL type CreateListViewFromViewBody struct { Req *types.CreateListViewFromView `xml:"urn:vim25 CreateListViewFromView,omitempty"` - Res *types.CreateListViewFromViewResponse `xml:"urn:vim25 CreateListViewFromViewResponse,omitempty"` + Res *types.CreateListViewFromViewResponse `xml:"CreateListViewFromViewResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2745,7 +2925,7 @@ func CreateListViewFromView(ctx context.Context, r soap.RoundTripper, req *types type CreateLocalDatastoreBody struct { Req *types.CreateLocalDatastore `xml:"urn:vim25 CreateLocalDatastore,omitempty"` - Res *types.CreateLocalDatastoreResponse `xml:"urn:vim25 CreateLocalDatastoreResponse,omitempty"` + Res *types.CreateLocalDatastoreResponse `xml:"CreateLocalDatastoreResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2765,7 +2945,7 @@ func CreateLocalDatastore(ctx context.Context, r soap.RoundTripper, req *types.C type CreateNasDatastoreBody struct { Req *types.CreateNasDatastore `xml:"urn:vim25 CreateNasDatastore,omitempty"` - Res *types.CreateNasDatastoreResponse `xml:"urn:vim25 CreateNasDatastoreResponse,omitempty"` + Res *types.CreateNasDatastoreResponse `xml:"CreateNasDatastoreResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2785,7 +2965,7 @@ func CreateNasDatastore(ctx context.Context, r soap.RoundTripper, req *types.Cre type CreateNvdimmNamespace_TaskBody struct { Req *types.CreateNvdimmNamespace_Task `xml:"urn:vim25 CreateNvdimmNamespace_Task,omitempty"` - Res *types.CreateNvdimmNamespace_TaskResponse `xml:"urn:vim25 CreateNvdimmNamespace_TaskResponse,omitempty"` + Res *types.CreateNvdimmNamespace_TaskResponse `xml:"CreateNvdimmNamespace_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2803,9 +2983,49 @@ func CreateNvdimmNamespace_Task(ctx context.Context, r soap.RoundTripper, req *t return resBody.Res, nil } +type CreateNvdimmPMemNamespace_TaskBody struct { + Req *types.CreateNvdimmPMemNamespace_Task `xml:"urn:vim25 CreateNvdimmPMemNamespace_Task,omitempty"` + Res *types.CreateNvdimmPMemNamespace_TaskResponse `xml:"CreateNvdimmPMemNamespace_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateNvdimmPMemNamespace_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateNvdimmPMemNamespace_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateNvdimmPMemNamespace_Task) (*types.CreateNvdimmPMemNamespace_TaskResponse, error) { + var reqBody, resBody CreateNvdimmPMemNamespace_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateNvmeOverRdmaAdapterBody struct { + Req *types.CreateNvmeOverRdmaAdapter `xml:"urn:vim25 CreateNvmeOverRdmaAdapter,omitempty"` + Res *types.CreateNvmeOverRdmaAdapterResponse `xml:"CreateNvmeOverRdmaAdapterResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateNvmeOverRdmaAdapterBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateNvmeOverRdmaAdapter(ctx context.Context, r soap.RoundTripper, req *types.CreateNvmeOverRdmaAdapter) (*types.CreateNvmeOverRdmaAdapterResponse, error) { + var reqBody, resBody CreateNvmeOverRdmaAdapterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type CreateObjectScheduledTaskBody struct { Req *types.CreateObjectScheduledTask `xml:"urn:vim25 CreateObjectScheduledTask,omitempty"` - Res *types.CreateObjectScheduledTaskResponse `xml:"urn:vim25 CreateObjectScheduledTaskResponse,omitempty"` + Res *types.CreateObjectScheduledTaskResponse `xml:"CreateObjectScheduledTaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2825,7 +3045,7 @@ func CreateObjectScheduledTask(ctx context.Context, r soap.RoundTripper, req *ty type CreatePerfIntervalBody struct { Req *types.CreatePerfInterval `xml:"urn:vim25 CreatePerfInterval,omitempty"` - Res *types.CreatePerfIntervalResponse `xml:"urn:vim25 CreatePerfIntervalResponse,omitempty"` + Res *types.CreatePerfIntervalResponse `xml:"CreatePerfIntervalResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2845,7 +3065,7 @@ func CreatePerfInterval(ctx context.Context, r soap.RoundTripper, req *types.Cre type CreateProfileBody struct { Req *types.CreateProfile `xml:"urn:vim25 CreateProfile,omitempty"` - Res *types.CreateProfileResponse `xml:"urn:vim25 CreateProfileResponse,omitempty"` + Res *types.CreateProfileResponse `xml:"CreateProfileResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2865,7 +3085,7 @@ func CreateProfile(ctx context.Context, r soap.RoundTripper, req *types.CreatePr type CreatePropertyCollectorBody struct { Req *types.CreatePropertyCollector `xml:"urn:vim25 CreatePropertyCollector,omitempty"` - Res *types.CreatePropertyCollectorResponse `xml:"urn:vim25 CreatePropertyCollectorResponse,omitempty"` + Res *types.CreatePropertyCollectorResponse `xml:"CreatePropertyCollectorResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2885,7 +3105,7 @@ func CreatePropertyCollector(ctx context.Context, r soap.RoundTripper, req *type type CreateRegistryKeyInGuestBody struct { Req *types.CreateRegistryKeyInGuest `xml:"urn:vim25 CreateRegistryKeyInGuest,omitempty"` - Res *types.CreateRegistryKeyInGuestResponse `xml:"urn:vim25 CreateRegistryKeyInGuestResponse,omitempty"` + Res *types.CreateRegistryKeyInGuestResponse `xml:"CreateRegistryKeyInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2905,7 +3125,7 @@ func CreateRegistryKeyInGuest(ctx context.Context, r soap.RoundTripper, req *typ type CreateResourcePoolBody struct { Req *types.CreateResourcePool `xml:"urn:vim25 CreateResourcePool,omitempty"` - Res *types.CreateResourcePoolResponse `xml:"urn:vim25 CreateResourcePoolResponse,omitempty"` + Res *types.CreateResourcePoolResponse `xml:"CreateResourcePoolResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2925,7 +3145,7 @@ func CreateResourcePool(ctx context.Context, r soap.RoundTripper, req *types.Cre type CreateScheduledTaskBody struct { Req *types.CreateScheduledTask `xml:"urn:vim25 CreateScheduledTask,omitempty"` - Res *types.CreateScheduledTaskResponse `xml:"urn:vim25 CreateScheduledTaskResponse,omitempty"` + Res *types.CreateScheduledTaskResponse `xml:"CreateScheduledTaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2945,7 +3165,7 @@ func CreateScheduledTask(ctx context.Context, r soap.RoundTripper, req *types.Cr type CreateScreenshot_TaskBody struct { Req *types.CreateScreenshot_Task `xml:"urn:vim25 CreateScreenshot_Task,omitempty"` - Res *types.CreateScreenshot_TaskResponse `xml:"urn:vim25 CreateScreenshot_TaskResponse,omitempty"` + Res *types.CreateScreenshot_TaskResponse `xml:"CreateScreenshot_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2965,7 +3185,7 @@ func CreateScreenshot_Task(ctx context.Context, r soap.RoundTripper, req *types. type CreateSecondaryVMEx_TaskBody struct { Req *types.CreateSecondaryVMEx_Task `xml:"urn:vim25 CreateSecondaryVMEx_Task,omitempty"` - Res *types.CreateSecondaryVMEx_TaskResponse `xml:"urn:vim25 CreateSecondaryVMEx_TaskResponse,omitempty"` + Res *types.CreateSecondaryVMEx_TaskResponse `xml:"CreateSecondaryVMEx_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2985,7 +3205,7 @@ func CreateSecondaryVMEx_Task(ctx context.Context, r soap.RoundTripper, req *typ type CreateSecondaryVM_TaskBody struct { Req *types.CreateSecondaryVM_Task `xml:"urn:vim25 CreateSecondaryVM_Task,omitempty"` - Res *types.CreateSecondaryVM_TaskResponse `xml:"urn:vim25 CreateSecondaryVM_TaskResponse,omitempty"` + Res *types.CreateSecondaryVM_TaskResponse `xml:"CreateSecondaryVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3005,7 +3225,7 @@ func CreateSecondaryVM_Task(ctx context.Context, r soap.RoundTripper, req *types type CreateSnapshotEx_TaskBody struct { Req *types.CreateSnapshotEx_Task `xml:"urn:vim25 CreateSnapshotEx_Task,omitempty"` - Res *types.CreateSnapshotEx_TaskResponse `xml:"urn:vim25 CreateSnapshotEx_TaskResponse,omitempty"` + Res *types.CreateSnapshotEx_TaskResponse `xml:"CreateSnapshotEx_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3025,7 +3245,7 @@ func CreateSnapshotEx_Task(ctx context.Context, r soap.RoundTripper, req *types. type CreateSnapshot_TaskBody struct { Req *types.CreateSnapshot_Task `xml:"urn:vim25 CreateSnapshot_Task,omitempty"` - Res *types.CreateSnapshot_TaskResponse `xml:"urn:vim25 CreateSnapshot_TaskResponse,omitempty"` + Res *types.CreateSnapshot_TaskResponse `xml:"CreateSnapshot_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3043,9 +3263,29 @@ func CreateSnapshot_Task(ctx context.Context, r soap.RoundTripper, req *types.Cr return resBody.Res, nil } +type CreateSoftwareAdapterBody struct { + Req *types.CreateSoftwareAdapter `xml:"urn:vim25 CreateSoftwareAdapter,omitempty"` + Res *types.CreateSoftwareAdapterResponse `xml:"CreateSoftwareAdapterResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateSoftwareAdapterBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateSoftwareAdapter(ctx context.Context, r soap.RoundTripper, req *types.CreateSoftwareAdapter) (*types.CreateSoftwareAdapterResponse, error) { + var reqBody, resBody CreateSoftwareAdapterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type CreateStoragePodBody struct { Req *types.CreateStoragePod `xml:"urn:vim25 CreateStoragePod,omitempty"` - Res *types.CreateStoragePodResponse `xml:"urn:vim25 CreateStoragePodResponse,omitempty"` + Res *types.CreateStoragePodResponse `xml:"CreateStoragePodResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3065,7 +3305,7 @@ func CreateStoragePod(ctx context.Context, r soap.RoundTripper, req *types.Creat type CreateTaskBody struct { Req *types.CreateTask `xml:"urn:vim25 CreateTask,omitempty"` - Res *types.CreateTaskResponse `xml:"urn:vim25 CreateTaskResponse,omitempty"` + Res *types.CreateTaskResponse `xml:"CreateTaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3085,7 +3325,7 @@ func CreateTask(ctx context.Context, r soap.RoundTripper, req *types.CreateTask) type CreateTemporaryDirectoryInGuestBody struct { Req *types.CreateTemporaryDirectoryInGuest `xml:"urn:vim25 CreateTemporaryDirectoryInGuest,omitempty"` - Res *types.CreateTemporaryDirectoryInGuestResponse `xml:"urn:vim25 CreateTemporaryDirectoryInGuestResponse,omitempty"` + Res *types.CreateTemporaryDirectoryInGuestResponse `xml:"CreateTemporaryDirectoryInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3105,7 +3345,7 @@ func CreateTemporaryDirectoryInGuest(ctx context.Context, r soap.RoundTripper, r type CreateTemporaryFileInGuestBody struct { Req *types.CreateTemporaryFileInGuest `xml:"urn:vim25 CreateTemporaryFileInGuest,omitempty"` - Res *types.CreateTemporaryFileInGuestResponse `xml:"urn:vim25 CreateTemporaryFileInGuestResponse,omitempty"` + Res *types.CreateTemporaryFileInGuestResponse `xml:"CreateTemporaryFileInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3125,7 +3365,7 @@ func CreateTemporaryFileInGuest(ctx context.Context, r soap.RoundTripper, req *t type CreateUserBody struct { Req *types.CreateUser `xml:"urn:vim25 CreateUser,omitempty"` - Res *types.CreateUserResponse `xml:"urn:vim25 CreateUserResponse,omitempty"` + Res *types.CreateUserResponse `xml:"CreateUserResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3145,7 +3385,7 @@ func CreateUser(ctx context.Context, r soap.RoundTripper, req *types.CreateUser) type CreateVAppBody struct { Req *types.CreateVApp `xml:"urn:vim25 CreateVApp,omitempty"` - Res *types.CreateVAppResponse `xml:"urn:vim25 CreateVAppResponse,omitempty"` + Res *types.CreateVAppResponse `xml:"CreateVAppResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3165,7 +3405,7 @@ func CreateVApp(ctx context.Context, r soap.RoundTripper, req *types.CreateVApp) type CreateVM_TaskBody struct { Req *types.CreateVM_Task `xml:"urn:vim25 CreateVM_Task,omitempty"` - Res *types.CreateVM_TaskResponse `xml:"urn:vim25 CreateVM_TaskResponse,omitempty"` + Res *types.CreateVM_TaskResponse `xml:"CreateVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3185,7 +3425,7 @@ func CreateVM_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateVM type CreateVirtualDisk_TaskBody struct { Req *types.CreateVirtualDisk_Task `xml:"urn:vim25 CreateVirtualDisk_Task,omitempty"` - Res *types.CreateVirtualDisk_TaskResponse `xml:"urn:vim25 CreateVirtualDisk_TaskResponse,omitempty"` + Res *types.CreateVirtualDisk_TaskResponse `xml:"CreateVirtualDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3205,7 +3445,7 @@ func CreateVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types type CreateVmfsDatastoreBody struct { Req *types.CreateVmfsDatastore `xml:"urn:vim25 CreateVmfsDatastore,omitempty"` - Res *types.CreateVmfsDatastoreResponse `xml:"urn:vim25 CreateVmfsDatastoreResponse,omitempty"` + Res *types.CreateVmfsDatastoreResponse `xml:"CreateVmfsDatastoreResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3225,7 +3465,7 @@ func CreateVmfsDatastore(ctx context.Context, r soap.RoundTripper, req *types.Cr type CreateVvolDatastoreBody struct { Req *types.CreateVvolDatastore `xml:"urn:vim25 CreateVvolDatastore,omitempty"` - Res *types.CreateVvolDatastoreResponse `xml:"urn:vim25 CreateVvolDatastoreResponse,omitempty"` + Res *types.CreateVvolDatastoreResponse `xml:"CreateVvolDatastoreResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3243,9 +3483,29 @@ func CreateVvolDatastore(ctx context.Context, r soap.RoundTripper, req *types.Cr return resBody.Res, nil } +type CryptoManagerHostDisableBody struct { + Req *types.CryptoManagerHostDisable `xml:"urn:vim25 CryptoManagerHostDisable,omitempty"` + Res *types.CryptoManagerHostDisableResponse `xml:"CryptoManagerHostDisableResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CryptoManagerHostDisableBody) Fault() *soap.Fault { return b.Fault_ } + +func CryptoManagerHostDisable(ctx context.Context, r soap.RoundTripper, req *types.CryptoManagerHostDisable) (*types.CryptoManagerHostDisableResponse, error) { + var reqBody, resBody CryptoManagerHostDisableBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type CryptoManagerHostEnableBody struct { Req *types.CryptoManagerHostEnable `xml:"urn:vim25 CryptoManagerHostEnable,omitempty"` - Res *types.CryptoManagerHostEnableResponse `xml:"urn:vim25 CryptoManagerHostEnableResponse,omitempty"` + Res *types.CryptoManagerHostEnableResponse `xml:"CryptoManagerHostEnableResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3265,7 +3525,7 @@ func CryptoManagerHostEnable(ctx context.Context, r soap.RoundTripper, req *type type CryptoManagerHostPrepareBody struct { Req *types.CryptoManagerHostPrepare `xml:"urn:vim25 CryptoManagerHostPrepare,omitempty"` - Res *types.CryptoManagerHostPrepareResponse `xml:"urn:vim25 CryptoManagerHostPrepareResponse,omitempty"` + Res *types.CryptoManagerHostPrepareResponse `xml:"CryptoManagerHostPrepareResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3285,7 +3545,7 @@ func CryptoManagerHostPrepare(ctx context.Context, r soap.RoundTripper, req *typ type CryptoUnlock_TaskBody struct { Req *types.CryptoUnlock_Task `xml:"urn:vim25 CryptoUnlock_Task,omitempty"` - Res *types.CryptoUnlock_TaskResponse `xml:"urn:vim25 CryptoUnlock_TaskResponse,omitempty"` + Res *types.CryptoUnlock_TaskResponse `xml:"CryptoUnlock_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3305,7 +3565,7 @@ func CryptoUnlock_Task(ctx context.Context, r soap.RoundTripper, req *types.Cryp type CurrentTimeBody struct { Req *types.CurrentTime `xml:"urn:vim25 CurrentTime,omitempty"` - Res *types.CurrentTimeResponse `xml:"urn:vim25 CurrentTimeResponse,omitempty"` + Res *types.CurrentTimeResponse `xml:"CurrentTimeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3325,7 +3585,7 @@ func CurrentTime(ctx context.Context, r soap.RoundTripper, req *types.CurrentTim type CustomizationSpecItemToXmlBody struct { Req *types.CustomizationSpecItemToXml `xml:"urn:vim25 CustomizationSpecItemToXml,omitempty"` - Res *types.CustomizationSpecItemToXmlResponse `xml:"urn:vim25 CustomizationSpecItemToXmlResponse,omitempty"` + Res *types.CustomizationSpecItemToXmlResponse `xml:"CustomizationSpecItemToXmlResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3343,9 +3603,29 @@ func CustomizationSpecItemToXml(ctx context.Context, r soap.RoundTripper, req *t return resBody.Res, nil } +type CustomizeGuest_TaskBody struct { + Req *types.CustomizeGuest_Task `xml:"urn:vim25 CustomizeGuest_Task,omitempty"` + Res *types.CustomizeGuest_TaskResponse `xml:"CustomizeGuest_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CustomizeGuest_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CustomizeGuest_Task(ctx context.Context, r soap.RoundTripper, req *types.CustomizeGuest_Task) (*types.CustomizeGuest_TaskResponse, error) { + var reqBody, resBody CustomizeGuest_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type CustomizeVM_TaskBody struct { Req *types.CustomizeVM_Task `xml:"urn:vim25 CustomizeVM_Task,omitempty"` - Res *types.CustomizeVM_TaskResponse `xml:"urn:vim25 CustomizeVM_TaskResponse,omitempty"` + Res *types.CustomizeVM_TaskResponse `xml:"CustomizeVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3365,7 +3645,7 @@ func CustomizeVM_Task(ctx context.Context, r soap.RoundTripper, req *types.Custo type DVPortgroupRollback_TaskBody struct { Req *types.DVPortgroupRollback_Task `xml:"urn:vim25 DVPortgroupRollback_Task,omitempty"` - Res *types.DVPortgroupRollback_TaskResponse `xml:"urn:vim25 DVPortgroupRollback_TaskResponse,omitempty"` + Res *types.DVPortgroupRollback_TaskResponse `xml:"DVPortgroupRollback_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3385,7 +3665,7 @@ func DVPortgroupRollback_Task(ctx context.Context, r soap.RoundTripper, req *typ type DVSManagerExportEntity_TaskBody struct { Req *types.DVSManagerExportEntity_Task `xml:"urn:vim25 DVSManagerExportEntity_Task,omitempty"` - Res *types.DVSManagerExportEntity_TaskResponse `xml:"urn:vim25 DVSManagerExportEntity_TaskResponse,omitempty"` + Res *types.DVSManagerExportEntity_TaskResponse `xml:"DVSManagerExportEntity_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3405,7 +3685,7 @@ func DVSManagerExportEntity_Task(ctx context.Context, r soap.RoundTripper, req * type DVSManagerImportEntity_TaskBody struct { Req *types.DVSManagerImportEntity_Task `xml:"urn:vim25 DVSManagerImportEntity_Task,omitempty"` - Res *types.DVSManagerImportEntity_TaskResponse `xml:"urn:vim25 DVSManagerImportEntity_TaskResponse,omitempty"` + Res *types.DVSManagerImportEntity_TaskResponse `xml:"DVSManagerImportEntity_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3425,7 +3705,7 @@ func DVSManagerImportEntity_Task(ctx context.Context, r soap.RoundTripper, req * type DVSManagerLookupDvPortGroupBody struct { Req *types.DVSManagerLookupDvPortGroup `xml:"urn:vim25 DVSManagerLookupDvPortGroup,omitempty"` - Res *types.DVSManagerLookupDvPortGroupResponse `xml:"urn:vim25 DVSManagerLookupDvPortGroupResponse,omitempty"` + Res *types.DVSManagerLookupDvPortGroupResponse `xml:"DVSManagerLookupDvPortGroupResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3445,7 +3725,7 @@ func DVSManagerLookupDvPortGroup(ctx context.Context, r soap.RoundTripper, req * type DVSRollback_TaskBody struct { Req *types.DVSRollback_Task `xml:"urn:vim25 DVSRollback_Task,omitempty"` - Res *types.DVSRollback_TaskResponse `xml:"urn:vim25 DVSRollback_TaskResponse,omitempty"` + Res *types.DVSRollback_TaskResponse `xml:"DVSRollback_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3465,7 +3745,7 @@ func DVSRollback_Task(ctx context.Context, r soap.RoundTripper, req *types.DVSRo type DatastoreEnterMaintenanceModeBody struct { Req *types.DatastoreEnterMaintenanceMode `xml:"urn:vim25 DatastoreEnterMaintenanceMode,omitempty"` - Res *types.DatastoreEnterMaintenanceModeResponse `xml:"urn:vim25 DatastoreEnterMaintenanceModeResponse,omitempty"` + Res *types.DatastoreEnterMaintenanceModeResponse `xml:"DatastoreEnterMaintenanceModeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3485,7 +3765,7 @@ func DatastoreEnterMaintenanceMode(ctx context.Context, r soap.RoundTripper, req type DatastoreExitMaintenanceMode_TaskBody struct { Req *types.DatastoreExitMaintenanceMode_Task `xml:"urn:vim25 DatastoreExitMaintenanceMode_Task,omitempty"` - Res *types.DatastoreExitMaintenanceMode_TaskResponse `xml:"urn:vim25 DatastoreExitMaintenanceMode_TaskResponse,omitempty"` + Res *types.DatastoreExitMaintenanceMode_TaskResponse `xml:"DatastoreExitMaintenanceMode_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3505,7 +3785,7 @@ func DatastoreExitMaintenanceMode_Task(ctx context.Context, r soap.RoundTripper, type DecodeLicenseBody struct { Req *types.DecodeLicense `xml:"urn:vim25 DecodeLicense,omitempty"` - Res *types.DecodeLicenseResponse `xml:"urn:vim25 DecodeLicenseResponse,omitempty"` + Res *types.DecodeLicenseResponse `xml:"DecodeLicenseResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3525,7 +3805,7 @@ func DecodeLicense(ctx context.Context, r soap.RoundTripper, req *types.DecodeLi type DefragmentAllDisksBody struct { Req *types.DefragmentAllDisks `xml:"urn:vim25 DefragmentAllDisks,omitempty"` - Res *types.DefragmentAllDisksResponse `xml:"urn:vim25 DefragmentAllDisksResponse,omitempty"` + Res *types.DefragmentAllDisksResponse `xml:"DefragmentAllDisksResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3545,7 +3825,7 @@ func DefragmentAllDisks(ctx context.Context, r soap.RoundTripper, req *types.Def type DefragmentVirtualDisk_TaskBody struct { Req *types.DefragmentVirtualDisk_Task `xml:"urn:vim25 DefragmentVirtualDisk_Task,omitempty"` - Res *types.DefragmentVirtualDisk_TaskResponse `xml:"urn:vim25 DefragmentVirtualDisk_TaskResponse,omitempty"` + Res *types.DefragmentVirtualDisk_TaskResponse `xml:"DefragmentVirtualDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3565,7 +3845,7 @@ func DefragmentVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *t type DeleteCustomizationSpecBody struct { Req *types.DeleteCustomizationSpec `xml:"urn:vim25 DeleteCustomizationSpec,omitempty"` - Res *types.DeleteCustomizationSpecResponse `xml:"urn:vim25 DeleteCustomizationSpecResponse,omitempty"` + Res *types.DeleteCustomizationSpecResponse `xml:"DeleteCustomizationSpecResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3585,7 +3865,7 @@ func DeleteCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *type type DeleteDatastoreFile_TaskBody struct { Req *types.DeleteDatastoreFile_Task `xml:"urn:vim25 DeleteDatastoreFile_Task,omitempty"` - Res *types.DeleteDatastoreFile_TaskResponse `xml:"urn:vim25 DeleteDatastoreFile_TaskResponse,omitempty"` + Res *types.DeleteDatastoreFile_TaskResponse `xml:"DeleteDatastoreFile_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3605,7 +3885,7 @@ func DeleteDatastoreFile_Task(ctx context.Context, r soap.RoundTripper, req *typ type DeleteDirectoryBody struct { Req *types.DeleteDirectory `xml:"urn:vim25 DeleteDirectory,omitempty"` - Res *types.DeleteDirectoryResponse `xml:"urn:vim25 DeleteDirectoryResponse,omitempty"` + Res *types.DeleteDirectoryResponse `xml:"DeleteDirectoryResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3625,7 +3905,7 @@ func DeleteDirectory(ctx context.Context, r soap.RoundTripper, req *types.Delete type DeleteDirectoryInGuestBody struct { Req *types.DeleteDirectoryInGuest `xml:"urn:vim25 DeleteDirectoryInGuest,omitempty"` - Res *types.DeleteDirectoryInGuestResponse `xml:"urn:vim25 DeleteDirectoryInGuestResponse,omitempty"` + Res *types.DeleteDirectoryInGuestResponse `xml:"DeleteDirectoryInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3645,7 +3925,7 @@ func DeleteDirectoryInGuest(ctx context.Context, r soap.RoundTripper, req *types type DeleteFileBody struct { Req *types.DeleteFile `xml:"urn:vim25 DeleteFile,omitempty"` - Res *types.DeleteFileResponse `xml:"urn:vim25 DeleteFileResponse,omitempty"` + Res *types.DeleteFileResponse `xml:"DeleteFileResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3665,7 +3945,7 @@ func DeleteFile(ctx context.Context, r soap.RoundTripper, req *types.DeleteFile) type DeleteFileInGuestBody struct { Req *types.DeleteFileInGuest `xml:"urn:vim25 DeleteFileInGuest,omitempty"` - Res *types.DeleteFileInGuestResponse `xml:"urn:vim25 DeleteFileInGuestResponse,omitempty"` + Res *types.DeleteFileInGuestResponse `xml:"DeleteFileInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3685,7 +3965,7 @@ func DeleteFileInGuest(ctx context.Context, r soap.RoundTripper, req *types.Dele type DeleteHostSpecificationBody struct { Req *types.DeleteHostSpecification `xml:"urn:vim25 DeleteHostSpecification,omitempty"` - Res *types.DeleteHostSpecificationResponse `xml:"urn:vim25 DeleteHostSpecificationResponse,omitempty"` + Res *types.DeleteHostSpecificationResponse `xml:"DeleteHostSpecificationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3705,7 +3985,7 @@ func DeleteHostSpecification(ctx context.Context, r soap.RoundTripper, req *type type DeleteHostSubSpecificationBody struct { Req *types.DeleteHostSubSpecification `xml:"urn:vim25 DeleteHostSubSpecification,omitempty"` - Res *types.DeleteHostSubSpecificationResponse `xml:"urn:vim25 DeleteHostSubSpecificationResponse,omitempty"` + Res *types.DeleteHostSubSpecificationResponse `xml:"DeleteHostSubSpecificationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3725,7 +4005,7 @@ func DeleteHostSubSpecification(ctx context.Context, r soap.RoundTripper, req *t type DeleteNvdimmBlockNamespaces_TaskBody struct { Req *types.DeleteNvdimmBlockNamespaces_Task `xml:"urn:vim25 DeleteNvdimmBlockNamespaces_Task,omitempty"` - Res *types.DeleteNvdimmBlockNamespaces_TaskResponse `xml:"urn:vim25 DeleteNvdimmBlockNamespaces_TaskResponse,omitempty"` + Res *types.DeleteNvdimmBlockNamespaces_TaskResponse `xml:"DeleteNvdimmBlockNamespaces_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3745,7 +4025,7 @@ func DeleteNvdimmBlockNamespaces_Task(ctx context.Context, r soap.RoundTripper, type DeleteNvdimmNamespace_TaskBody struct { Req *types.DeleteNvdimmNamespace_Task `xml:"urn:vim25 DeleteNvdimmNamespace_Task,omitempty"` - Res *types.DeleteNvdimmNamespace_TaskResponse `xml:"urn:vim25 DeleteNvdimmNamespace_TaskResponse,omitempty"` + Res *types.DeleteNvdimmNamespace_TaskResponse `xml:"DeleteNvdimmNamespace_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3765,7 +4045,7 @@ func DeleteNvdimmNamespace_Task(ctx context.Context, r soap.RoundTripper, req *t type DeleteRegistryKeyInGuestBody struct { Req *types.DeleteRegistryKeyInGuest `xml:"urn:vim25 DeleteRegistryKeyInGuest,omitempty"` - Res *types.DeleteRegistryKeyInGuestResponse `xml:"urn:vim25 DeleteRegistryKeyInGuestResponse,omitempty"` + Res *types.DeleteRegistryKeyInGuestResponse `xml:"DeleteRegistryKeyInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3785,7 +4065,7 @@ func DeleteRegistryKeyInGuest(ctx context.Context, r soap.RoundTripper, req *typ type DeleteRegistryValueInGuestBody struct { Req *types.DeleteRegistryValueInGuest `xml:"urn:vim25 DeleteRegistryValueInGuest,omitempty"` - Res *types.DeleteRegistryValueInGuestResponse `xml:"urn:vim25 DeleteRegistryValueInGuestResponse,omitempty"` + Res *types.DeleteRegistryValueInGuestResponse `xml:"DeleteRegistryValueInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3805,7 +4085,7 @@ func DeleteRegistryValueInGuest(ctx context.Context, r soap.RoundTripper, req *t type DeleteScsiLunStateBody struct { Req *types.DeleteScsiLunState `xml:"urn:vim25 DeleteScsiLunState,omitempty"` - Res *types.DeleteScsiLunStateResponse `xml:"urn:vim25 DeleteScsiLunStateResponse,omitempty"` + Res *types.DeleteScsiLunStateResponse `xml:"DeleteScsiLunStateResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3825,7 +4105,7 @@ func DeleteScsiLunState(ctx context.Context, r soap.RoundTripper, req *types.Del type DeleteSnapshot_TaskBody struct { Req *types.DeleteSnapshot_Task `xml:"urn:vim25 DeleteSnapshot_Task,omitempty"` - Res *types.DeleteSnapshot_TaskResponse `xml:"urn:vim25 DeleteSnapshot_TaskResponse,omitempty"` + Res *types.DeleteSnapshot_TaskResponse `xml:"DeleteSnapshot_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3843,9 +4123,29 @@ func DeleteSnapshot_Task(ctx context.Context, r soap.RoundTripper, req *types.De return resBody.Res, nil } +type DeleteVStorageObjectEx_TaskBody struct { + Req *types.DeleteVStorageObjectEx_Task `xml:"urn:vim25 DeleteVStorageObjectEx_Task,omitempty"` + Res *types.DeleteVStorageObjectEx_TaskResponse `xml:"DeleteVStorageObjectEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteVStorageObjectEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteVStorageObjectEx_Task(ctx context.Context, r soap.RoundTripper, req *types.DeleteVStorageObjectEx_Task) (*types.DeleteVStorageObjectEx_TaskResponse, error) { + var reqBody, resBody DeleteVStorageObjectEx_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type DeleteVStorageObject_TaskBody struct { Req *types.DeleteVStorageObject_Task `xml:"urn:vim25 DeleteVStorageObject_Task,omitempty"` - Res *types.DeleteVStorageObject_TaskResponse `xml:"urn:vim25 DeleteVStorageObject_TaskResponse,omitempty"` + Res *types.DeleteVStorageObject_TaskResponse `xml:"DeleteVStorageObject_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3865,7 +4165,7 @@ func DeleteVStorageObject_Task(ctx context.Context, r soap.RoundTripper, req *ty type DeleteVffsVolumeStateBody struct { Req *types.DeleteVffsVolumeState `xml:"urn:vim25 DeleteVffsVolumeState,omitempty"` - Res *types.DeleteVffsVolumeStateResponse `xml:"urn:vim25 DeleteVffsVolumeStateResponse,omitempty"` + Res *types.DeleteVffsVolumeStateResponse `xml:"DeleteVffsVolumeStateResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3885,7 +4185,7 @@ func DeleteVffsVolumeState(ctx context.Context, r soap.RoundTripper, req *types. type DeleteVirtualDisk_TaskBody struct { Req *types.DeleteVirtualDisk_Task `xml:"urn:vim25 DeleteVirtualDisk_Task,omitempty"` - Res *types.DeleteVirtualDisk_TaskResponse `xml:"urn:vim25 DeleteVirtualDisk_TaskResponse,omitempty"` + Res *types.DeleteVirtualDisk_TaskResponse `xml:"DeleteVirtualDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3905,7 +4205,7 @@ func DeleteVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types type DeleteVmfsVolumeStateBody struct { Req *types.DeleteVmfsVolumeState `xml:"urn:vim25 DeleteVmfsVolumeState,omitempty"` - Res *types.DeleteVmfsVolumeStateResponse `xml:"urn:vim25 DeleteVmfsVolumeStateResponse,omitempty"` + Res *types.DeleteVmfsVolumeStateResponse `xml:"DeleteVmfsVolumeStateResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3925,7 +4225,7 @@ func DeleteVmfsVolumeState(ctx context.Context, r soap.RoundTripper, req *types. type DeleteVsanObjectsBody struct { Req *types.DeleteVsanObjects `xml:"urn:vim25 DeleteVsanObjects,omitempty"` - Res *types.DeleteVsanObjectsResponse `xml:"urn:vim25 DeleteVsanObjectsResponse,omitempty"` + Res *types.DeleteVsanObjectsResponse `xml:"DeleteVsanObjectsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3945,7 +4245,7 @@ func DeleteVsanObjects(ctx context.Context, r soap.RoundTripper, req *types.Dele type DeselectVnicBody struct { Req *types.DeselectVnic `xml:"urn:vim25 DeselectVnic,omitempty"` - Res *types.DeselectVnicResponse `xml:"urn:vim25 DeselectVnicResponse,omitempty"` + Res *types.DeselectVnicResponse `xml:"DeselectVnicResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3965,7 +4265,7 @@ func DeselectVnic(ctx context.Context, r soap.RoundTripper, req *types.DeselectV type DeselectVnicForNicTypeBody struct { Req *types.DeselectVnicForNicType `xml:"urn:vim25 DeselectVnicForNicType,omitempty"` - Res *types.DeselectVnicForNicTypeResponse `xml:"urn:vim25 DeselectVnicForNicTypeResponse,omitempty"` + Res *types.DeselectVnicForNicTypeResponse `xml:"DeselectVnicForNicTypeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3985,7 +4285,7 @@ func DeselectVnicForNicType(ctx context.Context, r soap.RoundTripper, req *types type DestroyChildrenBody struct { Req *types.DestroyChildren `xml:"urn:vim25 DestroyChildren,omitempty"` - Res *types.DestroyChildrenResponse `xml:"urn:vim25 DestroyChildrenResponse,omitempty"` + Res *types.DestroyChildrenResponse `xml:"DestroyChildrenResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4005,7 +4305,7 @@ func DestroyChildren(ctx context.Context, r soap.RoundTripper, req *types.Destro type DestroyCollectorBody struct { Req *types.DestroyCollector `xml:"urn:vim25 DestroyCollector,omitempty"` - Res *types.DestroyCollectorResponse `xml:"urn:vim25 DestroyCollectorResponse,omitempty"` + Res *types.DestroyCollectorResponse `xml:"DestroyCollectorResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4025,7 +4325,7 @@ func DestroyCollector(ctx context.Context, r soap.RoundTripper, req *types.Destr type DestroyDatastoreBody struct { Req *types.DestroyDatastore `xml:"urn:vim25 DestroyDatastore,omitempty"` - Res *types.DestroyDatastoreResponse `xml:"urn:vim25 DestroyDatastoreResponse,omitempty"` + Res *types.DestroyDatastoreResponse `xml:"DestroyDatastoreResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4045,7 +4345,7 @@ func DestroyDatastore(ctx context.Context, r soap.RoundTripper, req *types.Destr type DestroyIpPoolBody struct { Req *types.DestroyIpPool `xml:"urn:vim25 DestroyIpPool,omitempty"` - Res *types.DestroyIpPoolResponse `xml:"urn:vim25 DestroyIpPoolResponse,omitempty"` + Res *types.DestroyIpPoolResponse `xml:"DestroyIpPoolResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4065,7 +4365,7 @@ func DestroyIpPool(ctx context.Context, r soap.RoundTripper, req *types.DestroyI type DestroyNetworkBody struct { Req *types.DestroyNetwork `xml:"urn:vim25 DestroyNetwork,omitempty"` - Res *types.DestroyNetworkResponse `xml:"urn:vim25 DestroyNetworkResponse,omitempty"` + Res *types.DestroyNetworkResponse `xml:"DestroyNetworkResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4085,7 +4385,7 @@ func DestroyNetwork(ctx context.Context, r soap.RoundTripper, req *types.Destroy type DestroyProfileBody struct { Req *types.DestroyProfile `xml:"urn:vim25 DestroyProfile,omitempty"` - Res *types.DestroyProfileResponse `xml:"urn:vim25 DestroyProfileResponse,omitempty"` + Res *types.DestroyProfileResponse `xml:"DestroyProfileResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4105,7 +4405,7 @@ func DestroyProfile(ctx context.Context, r soap.RoundTripper, req *types.Destroy type DestroyPropertyCollectorBody struct { Req *types.DestroyPropertyCollector `xml:"urn:vim25 DestroyPropertyCollector,omitempty"` - Res *types.DestroyPropertyCollectorResponse `xml:"urn:vim25 DestroyPropertyCollectorResponse,omitempty"` + Res *types.DestroyPropertyCollectorResponse `xml:"DestroyPropertyCollectorResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4125,7 +4425,7 @@ func DestroyPropertyCollector(ctx context.Context, r soap.RoundTripper, req *typ type DestroyPropertyFilterBody struct { Req *types.DestroyPropertyFilter `xml:"urn:vim25 DestroyPropertyFilter,omitempty"` - Res *types.DestroyPropertyFilterResponse `xml:"urn:vim25 DestroyPropertyFilterResponse,omitempty"` + Res *types.DestroyPropertyFilterResponse `xml:"DestroyPropertyFilterResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4145,7 +4445,7 @@ func DestroyPropertyFilter(ctx context.Context, r soap.RoundTripper, req *types. type DestroyVffsBody struct { Req *types.DestroyVffs `xml:"urn:vim25 DestroyVffs,omitempty"` - Res *types.DestroyVffsResponse `xml:"urn:vim25 DestroyVffsResponse,omitempty"` + Res *types.DestroyVffsResponse `xml:"DestroyVffsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4165,7 +4465,7 @@ func DestroyVffs(ctx context.Context, r soap.RoundTripper, req *types.DestroyVff type DestroyViewBody struct { Req *types.DestroyView `xml:"urn:vim25 DestroyView,omitempty"` - Res *types.DestroyViewResponse `xml:"urn:vim25 DestroyViewResponse,omitempty"` + Res *types.DestroyViewResponse `xml:"DestroyViewResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4185,7 +4485,7 @@ func DestroyView(ctx context.Context, r soap.RoundTripper, req *types.DestroyVie type Destroy_TaskBody struct { Req *types.Destroy_Task `xml:"urn:vim25 Destroy_Task,omitempty"` - Res *types.Destroy_TaskResponse `xml:"urn:vim25 Destroy_TaskResponse,omitempty"` + Res *types.Destroy_TaskResponse `xml:"Destroy_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4205,7 +4505,7 @@ func Destroy_Task(ctx context.Context, r soap.RoundTripper, req *types.Destroy_T type DetachDisk_TaskBody struct { Req *types.DetachDisk_Task `xml:"urn:vim25 DetachDisk_Task,omitempty"` - Res *types.DetachDisk_TaskResponse `xml:"urn:vim25 DetachDisk_TaskResponse,omitempty"` + Res *types.DetachDisk_TaskResponse `xml:"DetachDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4225,7 +4525,7 @@ func DetachDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.Detach type DetachScsiLunBody struct { Req *types.DetachScsiLun `xml:"urn:vim25 DetachScsiLun,omitempty"` - Res *types.DetachScsiLunResponse `xml:"urn:vim25 DetachScsiLunResponse,omitempty"` + Res *types.DetachScsiLunResponse `xml:"DetachScsiLunResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4245,7 +4545,7 @@ func DetachScsiLun(ctx context.Context, r soap.RoundTripper, req *types.DetachSc type DetachScsiLunEx_TaskBody struct { Req *types.DetachScsiLunEx_Task `xml:"urn:vim25 DetachScsiLunEx_Task,omitempty"` - Res *types.DetachScsiLunEx_TaskResponse `xml:"urn:vim25 DetachScsiLunEx_TaskResponse,omitempty"` + Res *types.DetachScsiLunEx_TaskResponse `xml:"DetachScsiLunEx_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4265,7 +4565,7 @@ func DetachScsiLunEx_Task(ctx context.Context, r soap.RoundTripper, req *types.D type DetachTagFromVStorageObjectBody struct { Req *types.DetachTagFromVStorageObject `xml:"urn:vim25 DetachTagFromVStorageObject,omitempty"` - Res *types.DetachTagFromVStorageObjectResponse `xml:"urn:vim25 DetachTagFromVStorageObjectResponse,omitempty"` + Res *types.DetachTagFromVStorageObjectResponse `xml:"DetachTagFromVStorageObjectResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4283,9 +4583,49 @@ func DetachTagFromVStorageObject(ctx context.Context, r soap.RoundTripper, req * return resBody.Res, nil } +type DisableAlarmBody struct { + Req *types.DisableAlarm `xml:"urn:vim25 DisableAlarm,omitempty"` + Res *types.DisableAlarmResponse `xml:"DisableAlarmResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DisableAlarmBody) Fault() *soap.Fault { return b.Fault_ } + +func DisableAlarm(ctx context.Context, r soap.RoundTripper, req *types.DisableAlarm) (*types.DisableAlarmResponse, error) { + var reqBody, resBody DisableAlarmBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DisableClusteredVmdkSupportBody struct { + Req *types.DisableClusteredVmdkSupport `xml:"urn:vim25 DisableClusteredVmdkSupport,omitempty"` + Res *types.DisableClusteredVmdkSupportResponse `xml:"DisableClusteredVmdkSupportResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DisableClusteredVmdkSupportBody) Fault() *soap.Fault { return b.Fault_ } + +func DisableClusteredVmdkSupport(ctx context.Context, r soap.RoundTripper, req *types.DisableClusteredVmdkSupport) (*types.DisableClusteredVmdkSupportResponse, error) { + var reqBody, resBody DisableClusteredVmdkSupportBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type DisableEvcMode_TaskBody struct { Req *types.DisableEvcMode_Task `xml:"urn:vim25 DisableEvcMode_Task,omitempty"` - Res *types.DisableEvcMode_TaskResponse `xml:"urn:vim25 DisableEvcMode_TaskResponse,omitempty"` + Res *types.DisableEvcMode_TaskResponse `xml:"DisableEvcMode_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4305,7 +4645,7 @@ func DisableEvcMode_Task(ctx context.Context, r soap.RoundTripper, req *types.Di type DisableFeatureBody struct { Req *types.DisableFeature `xml:"urn:vim25 DisableFeature,omitempty"` - Res *types.DisableFeatureResponse `xml:"urn:vim25 DisableFeatureResponse,omitempty"` + Res *types.DisableFeatureResponse `xml:"DisableFeatureResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4325,7 +4665,7 @@ func DisableFeature(ctx context.Context, r soap.RoundTripper, req *types.Disable type DisableHyperThreadingBody struct { Req *types.DisableHyperThreading `xml:"urn:vim25 DisableHyperThreading,omitempty"` - Res *types.DisableHyperThreadingResponse `xml:"urn:vim25 DisableHyperThreadingResponse,omitempty"` + Res *types.DisableHyperThreadingResponse `xml:"DisableHyperThreadingResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4345,7 +4685,7 @@ func DisableHyperThreading(ctx context.Context, r soap.RoundTripper, req *types. type DisableMultipathPathBody struct { Req *types.DisableMultipathPath `xml:"urn:vim25 DisableMultipathPath,omitempty"` - Res *types.DisableMultipathPathResponse `xml:"urn:vim25 DisableMultipathPathResponse,omitempty"` + Res *types.DisableMultipathPathResponse `xml:"DisableMultipathPathResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4365,7 +4705,7 @@ func DisableMultipathPath(ctx context.Context, r soap.RoundTripper, req *types.D type DisableRulesetBody struct { Req *types.DisableRuleset `xml:"urn:vim25 DisableRuleset,omitempty"` - Res *types.DisableRulesetResponse `xml:"urn:vim25 DisableRulesetResponse,omitempty"` + Res *types.DisableRulesetResponse `xml:"DisableRulesetResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4385,7 +4725,7 @@ func DisableRuleset(ctx context.Context, r soap.RoundTripper, req *types.Disable type DisableSecondaryVM_TaskBody struct { Req *types.DisableSecondaryVM_Task `xml:"urn:vim25 DisableSecondaryVM_Task,omitempty"` - Res *types.DisableSecondaryVM_TaskResponse `xml:"urn:vim25 DisableSecondaryVM_TaskResponse,omitempty"` + Res *types.DisableSecondaryVM_TaskResponse `xml:"DisableSecondaryVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4405,7 +4745,7 @@ func DisableSecondaryVM_Task(ctx context.Context, r soap.RoundTripper, req *type type DisableSmartCardAuthenticationBody struct { Req *types.DisableSmartCardAuthentication `xml:"urn:vim25 DisableSmartCardAuthentication,omitempty"` - Res *types.DisableSmartCardAuthenticationResponse `xml:"urn:vim25 DisableSmartCardAuthenticationResponse,omitempty"` + Res *types.DisableSmartCardAuthenticationResponse `xml:"DisableSmartCardAuthenticationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4425,7 +4765,7 @@ func DisableSmartCardAuthentication(ctx context.Context, r soap.RoundTripper, re type DisconnectHost_TaskBody struct { Req *types.DisconnectHost_Task `xml:"urn:vim25 DisconnectHost_Task,omitempty"` - Res *types.DisconnectHost_TaskResponse `xml:"urn:vim25 DisconnectHost_TaskResponse,omitempty"` + Res *types.DisconnectHost_TaskResponse `xml:"DisconnectHost_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4443,16 +4783,16 @@ func DisconnectHost_Task(ctx context.Context, r soap.RoundTripper, req *types.Di return resBody.Res, nil } -type DiscoverFcoeHbasBody struct { - Req *types.DiscoverFcoeHbas `xml:"urn:vim25 DiscoverFcoeHbas,omitempty"` - Res *types.DiscoverFcoeHbasResponse `xml:"urn:vim25 DiscoverFcoeHbasResponse,omitempty"` - Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +type DisconnectNvmeControllerBody struct { + Req *types.DisconnectNvmeController `xml:"urn:vim25 DisconnectNvmeController,omitempty"` + Res *types.DisconnectNvmeControllerResponse `xml:"DisconnectNvmeControllerResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } -func (b *DiscoverFcoeHbasBody) Fault() *soap.Fault { return b.Fault_ } +func (b *DisconnectNvmeControllerBody) Fault() *soap.Fault { return b.Fault_ } -func DiscoverFcoeHbas(ctx context.Context, r soap.RoundTripper, req *types.DiscoverFcoeHbas) (*types.DiscoverFcoeHbasResponse, error) { - var reqBody, resBody DiscoverFcoeHbasBody +func DisconnectNvmeController(ctx context.Context, r soap.RoundTripper, req *types.DisconnectNvmeController) (*types.DisconnectNvmeControllerResponse, error) { + var reqBody, resBody DisconnectNvmeControllerBody reqBody.Req = req @@ -4463,16 +4803,16 @@ func DiscoverFcoeHbas(ctx context.Context, r soap.RoundTripper, req *types.Disco return resBody.Res, nil } -type DissociateProfileBody struct { - Req *types.DissociateProfile `xml:"urn:vim25 DissociateProfile,omitempty"` - Res *types.DissociateProfileResponse `xml:"urn:vim25 DissociateProfileResponse,omitempty"` - Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +type DisconnectNvmeControllerEx_TaskBody struct { + Req *types.DisconnectNvmeControllerEx_Task `xml:"urn:vim25 DisconnectNvmeControllerEx_Task,omitempty"` + Res *types.DisconnectNvmeControllerEx_TaskResponse `xml:"DisconnectNvmeControllerEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } -func (b *DissociateProfileBody) Fault() *soap.Fault { return b.Fault_ } +func (b *DisconnectNvmeControllerEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } -func DissociateProfile(ctx context.Context, r soap.RoundTripper, req *types.DissociateProfile) (*types.DissociateProfileResponse, error) { - var reqBody, resBody DissociateProfileBody +func DisconnectNvmeControllerEx_Task(ctx context.Context, r soap.RoundTripper, req *types.DisconnectNvmeControllerEx_Task) (*types.DisconnectNvmeControllerEx_TaskResponse, error) { + var reqBody, resBody DisconnectNvmeControllerEx_TaskBody reqBody.Req = req @@ -4483,11 +4823,71 @@ func DissociateProfile(ctx context.Context, r soap.RoundTripper, req *types.Diss return resBody.Res, nil } -type DoesCustomizationSpecExistBody struct { - Req *types.DoesCustomizationSpecExist `xml:"urn:vim25 DoesCustomizationSpecExist,omitempty"` - Res *types.DoesCustomizationSpecExistResponse `xml:"urn:vim25 DoesCustomizationSpecExistResponse,omitempty"` - Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` -} +type DiscoverFcoeHbasBody struct { + Req *types.DiscoverFcoeHbas `xml:"urn:vim25 DiscoverFcoeHbas,omitempty"` + Res *types.DiscoverFcoeHbasResponse `xml:"DiscoverFcoeHbasResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DiscoverFcoeHbasBody) Fault() *soap.Fault { return b.Fault_ } + +func DiscoverFcoeHbas(ctx context.Context, r soap.RoundTripper, req *types.DiscoverFcoeHbas) (*types.DiscoverFcoeHbasResponse, error) { + var reqBody, resBody DiscoverFcoeHbasBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DiscoverNvmeControllersBody struct { + Req *types.DiscoverNvmeControllers `xml:"urn:vim25 DiscoverNvmeControllers,omitempty"` + Res *types.DiscoverNvmeControllersResponse `xml:"DiscoverNvmeControllersResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DiscoverNvmeControllersBody) Fault() *soap.Fault { return b.Fault_ } + +func DiscoverNvmeControllers(ctx context.Context, r soap.RoundTripper, req *types.DiscoverNvmeControllers) (*types.DiscoverNvmeControllersResponse, error) { + var reqBody, resBody DiscoverNvmeControllersBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DissociateProfileBody struct { + Req *types.DissociateProfile `xml:"urn:vim25 DissociateProfile,omitempty"` + Res *types.DissociateProfileResponse `xml:"DissociateProfileResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DissociateProfileBody) Fault() *soap.Fault { return b.Fault_ } + +func DissociateProfile(ctx context.Context, r soap.RoundTripper, req *types.DissociateProfile) (*types.DissociateProfileResponse, error) { + var reqBody, resBody DissociateProfileBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DoesCustomizationSpecExistBody struct { + Req *types.DoesCustomizationSpecExist `xml:"urn:vim25 DoesCustomizationSpecExist,omitempty"` + Res *types.DoesCustomizationSpecExistResponse `xml:"DoesCustomizationSpecExistResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} func (b *DoesCustomizationSpecExistBody) Fault() *soap.Fault { return b.Fault_ } @@ -4503,9 +4903,49 @@ func DoesCustomizationSpecExist(ctx context.Context, r soap.RoundTripper, req *t return resBody.Res, nil } +type DownloadDescriptionTreeBody struct { + Req *types.DownloadDescriptionTree `xml:"urn:vim25 DownloadDescriptionTree,omitempty"` + Res *types.DownloadDescriptionTreeResponse `xml:"DownloadDescriptionTreeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DownloadDescriptionTreeBody) Fault() *soap.Fault { return b.Fault_ } + +func DownloadDescriptionTree(ctx context.Context, r soap.RoundTripper, req *types.DownloadDescriptionTree) (*types.DownloadDescriptionTreeResponse, error) { + var reqBody, resBody DownloadDescriptionTreeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DropConnectionsBody struct { + Req *types.DropConnections `xml:"urn:vim25 DropConnections,omitempty"` + Res *types.DropConnectionsResponse `xml:"DropConnectionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DropConnectionsBody) Fault() *soap.Fault { return b.Fault_ } + +func DropConnections(ctx context.Context, r soap.RoundTripper, req *types.DropConnections) (*types.DropConnectionsResponse, error) { + var reqBody, resBody DropConnectionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type DuplicateCustomizationSpecBody struct { Req *types.DuplicateCustomizationSpec `xml:"urn:vim25 DuplicateCustomizationSpec,omitempty"` - Res *types.DuplicateCustomizationSpecResponse `xml:"urn:vim25 DuplicateCustomizationSpecResponse,omitempty"` + Res *types.DuplicateCustomizationSpecResponse `xml:"DuplicateCustomizationSpecResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4525,7 +4965,7 @@ func DuplicateCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *t type DvsReconfigureVmVnicNetworkResourcePool_TaskBody struct { Req *types.DvsReconfigureVmVnicNetworkResourcePool_Task `xml:"urn:vim25 DvsReconfigureVmVnicNetworkResourcePool_Task,omitempty"` - Res *types.DvsReconfigureVmVnicNetworkResourcePool_TaskResponse `xml:"urn:vim25 DvsReconfigureVmVnicNetworkResourcePool_TaskResponse,omitempty"` + Res *types.DvsReconfigureVmVnicNetworkResourcePool_TaskResponse `xml:"DvsReconfigureVmVnicNetworkResourcePool_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4545,7 +4985,7 @@ func DvsReconfigureVmVnicNetworkResourcePool_Task(ctx context.Context, r soap.Ro type EagerZeroVirtualDisk_TaskBody struct { Req *types.EagerZeroVirtualDisk_Task `xml:"urn:vim25 EagerZeroVirtualDisk_Task,omitempty"` - Res *types.EagerZeroVirtualDisk_TaskResponse `xml:"urn:vim25 EagerZeroVirtualDisk_TaskResponse,omitempty"` + Res *types.EagerZeroVirtualDisk_TaskResponse `xml:"EagerZeroVirtualDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4563,9 +5003,29 @@ func EagerZeroVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *ty return resBody.Res, nil } +type EnableAlarmBody struct { + Req *types.EnableAlarm `xml:"urn:vim25 EnableAlarm,omitempty"` + Res *types.EnableAlarmResponse `xml:"EnableAlarmResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EnableAlarmBody) Fault() *soap.Fault { return b.Fault_ } + +func EnableAlarm(ctx context.Context, r soap.RoundTripper, req *types.EnableAlarm) (*types.EnableAlarmResponse, error) { + var reqBody, resBody EnableAlarmBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type EnableAlarmActionsBody struct { Req *types.EnableAlarmActions `xml:"urn:vim25 EnableAlarmActions,omitempty"` - Res *types.EnableAlarmActionsResponse `xml:"urn:vim25 EnableAlarmActionsResponse,omitempty"` + Res *types.EnableAlarmActionsResponse `xml:"EnableAlarmActionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4583,9 +5043,29 @@ func EnableAlarmActions(ctx context.Context, r soap.RoundTripper, req *types.Ena return resBody.Res, nil } +type EnableClusteredVmdkSupportBody struct { + Req *types.EnableClusteredVmdkSupport `xml:"urn:vim25 EnableClusteredVmdkSupport,omitempty"` + Res *types.EnableClusteredVmdkSupportResponse `xml:"EnableClusteredVmdkSupportResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EnableClusteredVmdkSupportBody) Fault() *soap.Fault { return b.Fault_ } + +func EnableClusteredVmdkSupport(ctx context.Context, r soap.RoundTripper, req *types.EnableClusteredVmdkSupport) (*types.EnableClusteredVmdkSupportResponse, error) { + var reqBody, resBody EnableClusteredVmdkSupportBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type EnableCryptoBody struct { Req *types.EnableCrypto `xml:"urn:vim25 EnableCrypto,omitempty"` - Res *types.EnableCryptoResponse `xml:"urn:vim25 EnableCryptoResponse,omitempty"` + Res *types.EnableCryptoResponse `xml:"EnableCryptoResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4605,7 +5085,7 @@ func EnableCrypto(ctx context.Context, r soap.RoundTripper, req *types.EnableCry type EnableFeatureBody struct { Req *types.EnableFeature `xml:"urn:vim25 EnableFeature,omitempty"` - Res *types.EnableFeatureResponse `xml:"urn:vim25 EnableFeatureResponse,omitempty"` + Res *types.EnableFeatureResponse `xml:"EnableFeatureResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4625,7 +5105,7 @@ func EnableFeature(ctx context.Context, r soap.RoundTripper, req *types.EnableFe type EnableHyperThreadingBody struct { Req *types.EnableHyperThreading `xml:"urn:vim25 EnableHyperThreading,omitempty"` - Res *types.EnableHyperThreadingResponse `xml:"urn:vim25 EnableHyperThreadingResponse,omitempty"` + Res *types.EnableHyperThreadingResponse `xml:"EnableHyperThreadingResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4645,7 +5125,7 @@ func EnableHyperThreading(ctx context.Context, r soap.RoundTripper, req *types.E type EnableMultipathPathBody struct { Req *types.EnableMultipathPath `xml:"urn:vim25 EnableMultipathPath,omitempty"` - Res *types.EnableMultipathPathResponse `xml:"urn:vim25 EnableMultipathPathResponse,omitempty"` + Res *types.EnableMultipathPathResponse `xml:"EnableMultipathPathResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4665,7 +5145,7 @@ func EnableMultipathPath(ctx context.Context, r soap.RoundTripper, req *types.En type EnableNetworkResourceManagementBody struct { Req *types.EnableNetworkResourceManagement `xml:"urn:vim25 EnableNetworkResourceManagement,omitempty"` - Res *types.EnableNetworkResourceManagementResponse `xml:"urn:vim25 EnableNetworkResourceManagementResponse,omitempty"` + Res *types.EnableNetworkResourceManagementResponse `xml:"EnableNetworkResourceManagementResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4685,7 +5165,7 @@ func EnableNetworkResourceManagement(ctx context.Context, r soap.RoundTripper, r type EnableRulesetBody struct { Req *types.EnableRuleset `xml:"urn:vim25 EnableRuleset,omitempty"` - Res *types.EnableRulesetResponse `xml:"urn:vim25 EnableRulesetResponse,omitempty"` + Res *types.EnableRulesetResponse `xml:"EnableRulesetResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4705,7 +5185,7 @@ func EnableRuleset(ctx context.Context, r soap.RoundTripper, req *types.EnableRu type EnableSecondaryVM_TaskBody struct { Req *types.EnableSecondaryVM_Task `xml:"urn:vim25 EnableSecondaryVM_Task,omitempty"` - Res *types.EnableSecondaryVM_TaskResponse `xml:"urn:vim25 EnableSecondaryVM_TaskResponse,omitempty"` + Res *types.EnableSecondaryVM_TaskResponse `xml:"EnableSecondaryVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4725,7 +5205,7 @@ func EnableSecondaryVM_Task(ctx context.Context, r soap.RoundTripper, req *types type EnableSmartCardAuthenticationBody struct { Req *types.EnableSmartCardAuthentication `xml:"urn:vim25 EnableSmartCardAuthentication,omitempty"` - Res *types.EnableSmartCardAuthenticationResponse `xml:"urn:vim25 EnableSmartCardAuthenticationResponse,omitempty"` + Res *types.EnableSmartCardAuthenticationResponse `xml:"EnableSmartCardAuthenticationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4745,7 +5225,7 @@ func EnableSmartCardAuthentication(ctx context.Context, r soap.RoundTripper, req type EnterLockdownModeBody struct { Req *types.EnterLockdownMode `xml:"urn:vim25 EnterLockdownMode,omitempty"` - Res *types.EnterLockdownModeResponse `xml:"urn:vim25 EnterLockdownModeResponse,omitempty"` + Res *types.EnterLockdownModeResponse `xml:"EnterLockdownModeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4765,7 +5245,7 @@ func EnterLockdownMode(ctx context.Context, r soap.RoundTripper, req *types.Ente type EnterMaintenanceMode_TaskBody struct { Req *types.EnterMaintenanceMode_Task `xml:"urn:vim25 EnterMaintenanceMode_Task,omitempty"` - Res *types.EnterMaintenanceMode_TaskResponse `xml:"urn:vim25 EnterMaintenanceMode_TaskResponse,omitempty"` + Res *types.EnterMaintenanceMode_TaskResponse `xml:"EnterMaintenanceMode_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4785,7 +5265,7 @@ func EnterMaintenanceMode_Task(ctx context.Context, r soap.RoundTripper, req *ty type EstimateDatabaseSizeBody struct { Req *types.EstimateDatabaseSize `xml:"urn:vim25 EstimateDatabaseSize,omitempty"` - Res *types.EstimateDatabaseSizeResponse `xml:"urn:vim25 EstimateDatabaseSizeResponse,omitempty"` + Res *types.EstimateDatabaseSizeResponse `xml:"EstimateDatabaseSizeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4805,7 +5285,7 @@ func EstimateDatabaseSize(ctx context.Context, r soap.RoundTripper, req *types.E type EstimateStorageForConsolidateSnapshots_TaskBody struct { Req *types.EstimateStorageForConsolidateSnapshots_Task `xml:"urn:vim25 EstimateStorageForConsolidateSnapshots_Task,omitempty"` - Res *types.EstimateStorageForConsolidateSnapshots_TaskResponse `xml:"urn:vim25 EstimateStorageForConsolidateSnapshots_TaskResponse,omitempty"` + Res *types.EstimateStorageForConsolidateSnapshots_TaskResponse `xml:"EstimateStorageForConsolidateSnapshots_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4825,7 +5305,7 @@ func EstimateStorageForConsolidateSnapshots_Task(ctx context.Context, r soap.Rou type EsxAgentHostManagerUpdateConfigBody struct { Req *types.EsxAgentHostManagerUpdateConfig `xml:"urn:vim25 EsxAgentHostManagerUpdateConfig,omitempty"` - Res *types.EsxAgentHostManagerUpdateConfigResponse `xml:"urn:vim25 EsxAgentHostManagerUpdateConfigResponse,omitempty"` + Res *types.EsxAgentHostManagerUpdateConfigResponse `xml:"EsxAgentHostManagerUpdateConfigResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4845,7 +5325,7 @@ func EsxAgentHostManagerUpdateConfig(ctx context.Context, r soap.RoundTripper, r type EvacuateVsanNode_TaskBody struct { Req *types.EvacuateVsanNode_Task `xml:"urn:vim25 EvacuateVsanNode_Task,omitempty"` - Res *types.EvacuateVsanNode_TaskResponse `xml:"urn:vim25 EvacuateVsanNode_TaskResponse,omitempty"` + Res *types.EvacuateVsanNode_TaskResponse `xml:"EvacuateVsanNode_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4865,7 +5345,7 @@ func EvacuateVsanNode_Task(ctx context.Context, r soap.RoundTripper, req *types. type EvcManagerBody struct { Req *types.EvcManager `xml:"urn:vim25 EvcManager,omitempty"` - Res *types.EvcManagerResponse `xml:"urn:vim25 EvcManagerResponse,omitempty"` + Res *types.EvcManagerResponse `xml:"EvcManagerResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4885,7 +5365,7 @@ func EvcManager(ctx context.Context, r soap.RoundTripper, req *types.EvcManager) type ExecuteHostProfileBody struct { Req *types.ExecuteHostProfile `xml:"urn:vim25 ExecuteHostProfile,omitempty"` - Res *types.ExecuteHostProfileResponse `xml:"urn:vim25 ExecuteHostProfileResponse,omitempty"` + Res *types.ExecuteHostProfileResponse `xml:"ExecuteHostProfileResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4905,7 +5385,7 @@ func ExecuteHostProfile(ctx context.Context, r soap.RoundTripper, req *types.Exe type ExecuteSimpleCommandBody struct { Req *types.ExecuteSimpleCommand `xml:"urn:vim25 ExecuteSimpleCommand,omitempty"` - Res *types.ExecuteSimpleCommandResponse `xml:"urn:vim25 ExecuteSimpleCommandResponse,omitempty"` + Res *types.ExecuteSimpleCommandResponse `xml:"ExecuteSimpleCommandResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4925,7 +5405,7 @@ func ExecuteSimpleCommand(ctx context.Context, r soap.RoundTripper, req *types.E type ExitLockdownModeBody struct { Req *types.ExitLockdownMode `xml:"urn:vim25 ExitLockdownMode,omitempty"` - Res *types.ExitLockdownModeResponse `xml:"urn:vim25 ExitLockdownModeResponse,omitempty"` + Res *types.ExitLockdownModeResponse `xml:"ExitLockdownModeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4945,7 +5425,7 @@ func ExitLockdownMode(ctx context.Context, r soap.RoundTripper, req *types.ExitL type ExitMaintenanceMode_TaskBody struct { Req *types.ExitMaintenanceMode_Task `xml:"urn:vim25 ExitMaintenanceMode_Task,omitempty"` - Res *types.ExitMaintenanceMode_TaskResponse `xml:"urn:vim25 ExitMaintenanceMode_TaskResponse,omitempty"` + Res *types.ExitMaintenanceMode_TaskResponse `xml:"ExitMaintenanceMode_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4965,7 +5445,7 @@ func ExitMaintenanceMode_Task(ctx context.Context, r soap.RoundTripper, req *typ type ExpandVmfsDatastoreBody struct { Req *types.ExpandVmfsDatastore `xml:"urn:vim25 ExpandVmfsDatastore,omitempty"` - Res *types.ExpandVmfsDatastoreResponse `xml:"urn:vim25 ExpandVmfsDatastoreResponse,omitempty"` + Res *types.ExpandVmfsDatastoreResponse `xml:"ExpandVmfsDatastoreResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4985,7 +5465,7 @@ func ExpandVmfsDatastore(ctx context.Context, r soap.RoundTripper, req *types.Ex type ExpandVmfsExtentBody struct { Req *types.ExpandVmfsExtent `xml:"urn:vim25 ExpandVmfsExtent,omitempty"` - Res *types.ExpandVmfsExtentResponse `xml:"urn:vim25 ExpandVmfsExtentResponse,omitempty"` + Res *types.ExpandVmfsExtentResponse `xml:"ExpandVmfsExtentResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5005,7 +5485,7 @@ func ExpandVmfsExtent(ctx context.Context, r soap.RoundTripper, req *types.Expan type ExportAnswerFile_TaskBody struct { Req *types.ExportAnswerFile_Task `xml:"urn:vim25 ExportAnswerFile_Task,omitempty"` - Res *types.ExportAnswerFile_TaskResponse `xml:"urn:vim25 ExportAnswerFile_TaskResponse,omitempty"` + Res *types.ExportAnswerFile_TaskResponse `xml:"ExportAnswerFile_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5025,7 +5505,7 @@ func ExportAnswerFile_Task(ctx context.Context, r soap.RoundTripper, req *types. type ExportProfileBody struct { Req *types.ExportProfile `xml:"urn:vim25 ExportProfile,omitempty"` - Res *types.ExportProfileResponse `xml:"urn:vim25 ExportProfileResponse,omitempty"` + Res *types.ExportProfileResponse `xml:"ExportProfileResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5045,7 +5525,7 @@ func ExportProfile(ctx context.Context, r soap.RoundTripper, req *types.ExportPr type ExportSnapshotBody struct { Req *types.ExportSnapshot `xml:"urn:vim25 ExportSnapshot,omitempty"` - Res *types.ExportSnapshotResponse `xml:"urn:vim25 ExportSnapshotResponse,omitempty"` + Res *types.ExportSnapshotResponse `xml:"ExportSnapshotResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5065,7 +5545,7 @@ func ExportSnapshot(ctx context.Context, r soap.RoundTripper, req *types.ExportS type ExportVAppBody struct { Req *types.ExportVApp `xml:"urn:vim25 ExportVApp,omitempty"` - Res *types.ExportVAppResponse `xml:"urn:vim25 ExportVAppResponse,omitempty"` + Res *types.ExportVAppResponse `xml:"ExportVAppResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5085,7 +5565,7 @@ func ExportVApp(ctx context.Context, r soap.RoundTripper, req *types.ExportVApp) type ExportVmBody struct { Req *types.ExportVm `xml:"urn:vim25 ExportVm,omitempty"` - Res *types.ExportVmResponse `xml:"urn:vim25 ExportVmResponse,omitempty"` + Res *types.ExportVmResponse `xml:"ExportVmResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5105,7 +5585,7 @@ func ExportVm(ctx context.Context, r soap.RoundTripper, req *types.ExportVm) (*t type ExtendDisk_TaskBody struct { Req *types.ExtendDisk_Task `xml:"urn:vim25 ExtendDisk_Task,omitempty"` - Res *types.ExtendDisk_TaskResponse `xml:"urn:vim25 ExtendDisk_TaskResponse,omitempty"` + Res *types.ExtendDisk_TaskResponse `xml:"ExtendDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5123,9 +5603,29 @@ func ExtendDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.Extend return resBody.Res, nil } +type ExtendHCI_TaskBody struct { + Req *types.ExtendHCI_Task `xml:"urn:vim25 ExtendHCI_Task,omitempty"` + Res *types.ExtendHCI_TaskResponse `xml:"ExtendHCI_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ExtendHCI_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ExtendHCI_Task(ctx context.Context, r soap.RoundTripper, req *types.ExtendHCI_Task) (*types.ExtendHCI_TaskResponse, error) { + var reqBody, resBody ExtendHCI_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type ExtendVffsBody struct { Req *types.ExtendVffs `xml:"urn:vim25 ExtendVffs,omitempty"` - Res *types.ExtendVffsResponse `xml:"urn:vim25 ExtendVffsResponse,omitempty"` + Res *types.ExtendVffsResponse `xml:"ExtendVffsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5145,7 +5645,7 @@ func ExtendVffs(ctx context.Context, r soap.RoundTripper, req *types.ExtendVffs) type ExtendVirtualDisk_TaskBody struct { Req *types.ExtendVirtualDisk_Task `xml:"urn:vim25 ExtendVirtualDisk_Task,omitempty"` - Res *types.ExtendVirtualDisk_TaskResponse `xml:"urn:vim25 ExtendVirtualDisk_TaskResponse,omitempty"` + Res *types.ExtendVirtualDisk_TaskResponse `xml:"ExtendVirtualDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5165,7 +5665,7 @@ func ExtendVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types type ExtendVmfsDatastoreBody struct { Req *types.ExtendVmfsDatastore `xml:"urn:vim25 ExtendVmfsDatastore,omitempty"` - Res *types.ExtendVmfsDatastoreResponse `xml:"urn:vim25 ExtendVmfsDatastoreResponse,omitempty"` + Res *types.ExtendVmfsDatastoreResponse `xml:"ExtendVmfsDatastoreResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5185,7 +5685,7 @@ func ExtendVmfsDatastore(ctx context.Context, r soap.RoundTripper, req *types.Ex type ExtractOvfEnvironmentBody struct { Req *types.ExtractOvfEnvironment `xml:"urn:vim25 ExtractOvfEnvironment,omitempty"` - Res *types.ExtractOvfEnvironmentResponse `xml:"urn:vim25 ExtractOvfEnvironmentResponse,omitempty"` + Res *types.ExtractOvfEnvironmentResponse `xml:"ExtractOvfEnvironmentResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5203,9 +5703,29 @@ func ExtractOvfEnvironment(ctx context.Context, r soap.RoundTripper, req *types. return resBody.Res, nil } +type FetchAuditRecordsBody struct { + Req *types.FetchAuditRecords `xml:"urn:vim25 FetchAuditRecords,omitempty"` + Res *types.FetchAuditRecordsResponse `xml:"FetchAuditRecordsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FetchAuditRecordsBody) Fault() *soap.Fault { return b.Fault_ } + +func FetchAuditRecords(ctx context.Context, r soap.RoundTripper, req *types.FetchAuditRecords) (*types.FetchAuditRecordsResponse, error) { + var reqBody, resBody FetchAuditRecordsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type FetchDVPortKeysBody struct { Req *types.FetchDVPortKeys `xml:"urn:vim25 FetchDVPortKeys,omitempty"` - Res *types.FetchDVPortKeysResponse `xml:"urn:vim25 FetchDVPortKeysResponse,omitempty"` + Res *types.FetchDVPortKeysResponse `xml:"FetchDVPortKeysResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5225,7 +5745,7 @@ func FetchDVPortKeys(ctx context.Context, r soap.RoundTripper, req *types.FetchD type FetchDVPortsBody struct { Req *types.FetchDVPorts `xml:"urn:vim25 FetchDVPorts,omitempty"` - Res *types.FetchDVPortsResponse `xml:"urn:vim25 FetchDVPortsResponse,omitempty"` + Res *types.FetchDVPortsResponse `xml:"FetchDVPortsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5245,7 +5765,7 @@ func FetchDVPorts(ctx context.Context, r soap.RoundTripper, req *types.FetchDVPo type FetchSystemEventLogBody struct { Req *types.FetchSystemEventLog `xml:"urn:vim25 FetchSystemEventLog,omitempty"` - Res *types.FetchSystemEventLogResponse `xml:"urn:vim25 FetchSystemEventLogResponse,omitempty"` + Res *types.FetchSystemEventLogResponse `xml:"FetchSystemEventLogResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5265,7 +5785,7 @@ func FetchSystemEventLog(ctx context.Context, r soap.RoundTripper, req *types.Fe type FetchUserPrivilegeOnEntitiesBody struct { Req *types.FetchUserPrivilegeOnEntities `xml:"urn:vim25 FetchUserPrivilegeOnEntities,omitempty"` - Res *types.FetchUserPrivilegeOnEntitiesResponse `xml:"urn:vim25 FetchUserPrivilegeOnEntitiesResponse,omitempty"` + Res *types.FetchUserPrivilegeOnEntitiesResponse `xml:"FetchUserPrivilegeOnEntitiesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5285,7 +5805,7 @@ func FetchUserPrivilegeOnEntities(ctx context.Context, r soap.RoundTripper, req type FindAllByDnsNameBody struct { Req *types.FindAllByDnsName `xml:"urn:vim25 FindAllByDnsName,omitempty"` - Res *types.FindAllByDnsNameResponse `xml:"urn:vim25 FindAllByDnsNameResponse,omitempty"` + Res *types.FindAllByDnsNameResponse `xml:"FindAllByDnsNameResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5305,7 +5825,7 @@ func FindAllByDnsName(ctx context.Context, r soap.RoundTripper, req *types.FindA type FindAllByIpBody struct { Req *types.FindAllByIp `xml:"urn:vim25 FindAllByIp,omitempty"` - Res *types.FindAllByIpResponse `xml:"urn:vim25 FindAllByIpResponse,omitempty"` + Res *types.FindAllByIpResponse `xml:"FindAllByIpResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5325,7 +5845,7 @@ func FindAllByIp(ctx context.Context, r soap.RoundTripper, req *types.FindAllByI type FindAllByUuidBody struct { Req *types.FindAllByUuid `xml:"urn:vim25 FindAllByUuid,omitempty"` - Res *types.FindAllByUuidResponse `xml:"urn:vim25 FindAllByUuidResponse,omitempty"` + Res *types.FindAllByUuidResponse `xml:"FindAllByUuidResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5345,7 +5865,7 @@ func FindAllByUuid(ctx context.Context, r soap.RoundTripper, req *types.FindAllB type FindAssociatedProfileBody struct { Req *types.FindAssociatedProfile `xml:"urn:vim25 FindAssociatedProfile,omitempty"` - Res *types.FindAssociatedProfileResponse `xml:"urn:vim25 FindAssociatedProfileResponse,omitempty"` + Res *types.FindAssociatedProfileResponse `xml:"FindAssociatedProfileResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5365,7 +5885,7 @@ func FindAssociatedProfile(ctx context.Context, r soap.RoundTripper, req *types. type FindByDatastorePathBody struct { Req *types.FindByDatastorePath `xml:"urn:vim25 FindByDatastorePath,omitempty"` - Res *types.FindByDatastorePathResponse `xml:"urn:vim25 FindByDatastorePathResponse,omitempty"` + Res *types.FindByDatastorePathResponse `xml:"FindByDatastorePathResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5385,7 +5905,7 @@ func FindByDatastorePath(ctx context.Context, r soap.RoundTripper, req *types.Fi type FindByDnsNameBody struct { Req *types.FindByDnsName `xml:"urn:vim25 FindByDnsName,omitempty"` - Res *types.FindByDnsNameResponse `xml:"urn:vim25 FindByDnsNameResponse,omitempty"` + Res *types.FindByDnsNameResponse `xml:"FindByDnsNameResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5405,7 +5925,7 @@ func FindByDnsName(ctx context.Context, r soap.RoundTripper, req *types.FindByDn type FindByInventoryPathBody struct { Req *types.FindByInventoryPath `xml:"urn:vim25 FindByInventoryPath,omitempty"` - Res *types.FindByInventoryPathResponse `xml:"urn:vim25 FindByInventoryPathResponse,omitempty"` + Res *types.FindByInventoryPathResponse `xml:"FindByInventoryPathResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5425,7 +5945,7 @@ func FindByInventoryPath(ctx context.Context, r soap.RoundTripper, req *types.Fi type FindByIpBody struct { Req *types.FindByIp `xml:"urn:vim25 FindByIp,omitempty"` - Res *types.FindByIpResponse `xml:"urn:vim25 FindByIpResponse,omitempty"` + Res *types.FindByIpResponse `xml:"FindByIpResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5445,7 +5965,7 @@ func FindByIp(ctx context.Context, r soap.RoundTripper, req *types.FindByIp) (*t type FindByUuidBody struct { Req *types.FindByUuid `xml:"urn:vim25 FindByUuid,omitempty"` - Res *types.FindByUuidResponse `xml:"urn:vim25 FindByUuidResponse,omitempty"` + Res *types.FindByUuidResponse `xml:"FindByUuidResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5465,7 +5985,7 @@ func FindByUuid(ctx context.Context, r soap.RoundTripper, req *types.FindByUuid) type FindChildBody struct { Req *types.FindChild `xml:"urn:vim25 FindChild,omitempty"` - Res *types.FindChildResponse `xml:"urn:vim25 FindChildResponse,omitempty"` + Res *types.FindChildResponse `xml:"FindChildResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5485,7 +6005,7 @@ func FindChild(ctx context.Context, r soap.RoundTripper, req *types.FindChild) ( type FindExtensionBody struct { Req *types.FindExtension `xml:"urn:vim25 FindExtension,omitempty"` - Res *types.FindExtensionResponse `xml:"urn:vim25 FindExtensionResponse,omitempty"` + Res *types.FindExtensionResponse `xml:"FindExtensionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5505,7 +6025,7 @@ func FindExtension(ctx context.Context, r soap.RoundTripper, req *types.FindExte type FindRulesForVmBody struct { Req *types.FindRulesForVm `xml:"urn:vim25 FindRulesForVm,omitempty"` - Res *types.FindRulesForVmResponse `xml:"urn:vim25 FindRulesForVmResponse,omitempty"` + Res *types.FindRulesForVmResponse `xml:"FindRulesForVmResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5525,7 +6045,7 @@ func FindRulesForVm(ctx context.Context, r soap.RoundTripper, req *types.FindRul type FormatVffsBody struct { Req *types.FormatVffs `xml:"urn:vim25 FormatVffs,omitempty"` - Res *types.FormatVffsResponse `xml:"urn:vim25 FormatVffsResponse,omitempty"` + Res *types.FormatVffsResponse `xml:"FormatVffsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5545,7 +6065,7 @@ func FormatVffs(ctx context.Context, r soap.RoundTripper, req *types.FormatVffs) type FormatVmfsBody struct { Req *types.FormatVmfs `xml:"urn:vim25 FormatVmfs,omitempty"` - Res *types.FormatVmfsResponse `xml:"urn:vim25 FormatVmfsResponse,omitempty"` + Res *types.FormatVmfsResponse `xml:"FormatVmfsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5565,7 +6085,7 @@ func FormatVmfs(ctx context.Context, r soap.RoundTripper, req *types.FormatVmfs) type GenerateCertificateSigningRequestBody struct { Req *types.GenerateCertificateSigningRequest `xml:"urn:vim25 GenerateCertificateSigningRequest,omitempty"` - Res *types.GenerateCertificateSigningRequestResponse `xml:"urn:vim25 GenerateCertificateSigningRequestResponse,omitempty"` + Res *types.GenerateCertificateSigningRequestResponse `xml:"GenerateCertificateSigningRequestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5585,7 +6105,7 @@ func GenerateCertificateSigningRequest(ctx context.Context, r soap.RoundTripper, type GenerateCertificateSigningRequestByDnBody struct { Req *types.GenerateCertificateSigningRequestByDn `xml:"urn:vim25 GenerateCertificateSigningRequestByDn,omitempty"` - Res *types.GenerateCertificateSigningRequestByDnResponse `xml:"urn:vim25 GenerateCertificateSigningRequestByDnResponse,omitempty"` + Res *types.GenerateCertificateSigningRequestByDnResponse `xml:"GenerateCertificateSigningRequestByDnResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5605,7 +6125,7 @@ func GenerateCertificateSigningRequestByDn(ctx context.Context, r soap.RoundTrip type GenerateClientCsrBody struct { Req *types.GenerateClientCsr `xml:"urn:vim25 GenerateClientCsr,omitempty"` - Res *types.GenerateClientCsrResponse `xml:"urn:vim25 GenerateClientCsrResponse,omitempty"` + Res *types.GenerateClientCsrResponse `xml:"GenerateClientCsrResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5625,7 +6145,7 @@ func GenerateClientCsr(ctx context.Context, r soap.RoundTripper, req *types.Gene type GenerateConfigTaskListBody struct { Req *types.GenerateConfigTaskList `xml:"urn:vim25 GenerateConfigTaskList,omitempty"` - Res *types.GenerateConfigTaskListResponse `xml:"urn:vim25 GenerateConfigTaskListResponse,omitempty"` + Res *types.GenerateConfigTaskListResponse `xml:"GenerateConfigTaskListResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5645,7 +6165,7 @@ func GenerateConfigTaskList(ctx context.Context, r soap.RoundTripper, req *types type GenerateHostConfigTaskSpec_TaskBody struct { Req *types.GenerateHostConfigTaskSpec_Task `xml:"urn:vim25 GenerateHostConfigTaskSpec_Task,omitempty"` - Res *types.GenerateHostConfigTaskSpec_TaskResponse `xml:"urn:vim25 GenerateHostConfigTaskSpec_TaskResponse,omitempty"` + Res *types.GenerateHostConfigTaskSpec_TaskResponse `xml:"GenerateHostConfigTaskSpec_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5665,7 +6185,7 @@ func GenerateHostConfigTaskSpec_Task(ctx context.Context, r soap.RoundTripper, r type GenerateHostProfileTaskList_TaskBody struct { Req *types.GenerateHostProfileTaskList_Task `xml:"urn:vim25 GenerateHostProfileTaskList_Task,omitempty"` - Res *types.GenerateHostProfileTaskList_TaskResponse `xml:"urn:vim25 GenerateHostProfileTaskList_TaskResponse,omitempty"` + Res *types.GenerateHostProfileTaskList_TaskResponse `xml:"GenerateHostProfileTaskList_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5685,7 +6205,7 @@ func GenerateHostProfileTaskList_Task(ctx context.Context, r soap.RoundTripper, type GenerateKeyBody struct { Req *types.GenerateKey `xml:"urn:vim25 GenerateKey,omitempty"` - Res *types.GenerateKeyResponse `xml:"urn:vim25 GenerateKeyResponse,omitempty"` + Res *types.GenerateKeyResponse `xml:"GenerateKeyResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5705,7 +6225,7 @@ func GenerateKey(ctx context.Context, r soap.RoundTripper, req *types.GenerateKe type GenerateLogBundles_TaskBody struct { Req *types.GenerateLogBundles_Task `xml:"urn:vim25 GenerateLogBundles_Task,omitempty"` - Res *types.GenerateLogBundles_TaskResponse `xml:"urn:vim25 GenerateLogBundles_TaskResponse,omitempty"` + Res *types.GenerateLogBundles_TaskResponse `xml:"GenerateLogBundles_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5725,7 +6245,7 @@ func GenerateLogBundles_Task(ctx context.Context, r soap.RoundTripper, req *type type GenerateSelfSignedClientCertBody struct { Req *types.GenerateSelfSignedClientCert `xml:"urn:vim25 GenerateSelfSignedClientCert,omitempty"` - Res *types.GenerateSelfSignedClientCertResponse `xml:"urn:vim25 GenerateSelfSignedClientCertResponse,omitempty"` + Res *types.GenerateSelfSignedClientCertResponse `xml:"GenerateSelfSignedClientCertResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5745,7 +6265,7 @@ func GenerateSelfSignedClientCert(ctx context.Context, r soap.RoundTripper, req type GetAlarmBody struct { Req *types.GetAlarm `xml:"urn:vim25 GetAlarm,omitempty"` - Res *types.GetAlarmResponse `xml:"urn:vim25 GetAlarmResponse,omitempty"` + Res *types.GetAlarmResponse `xml:"GetAlarmResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5765,7 +6285,7 @@ func GetAlarm(ctx context.Context, r soap.RoundTripper, req *types.GetAlarm) (*t type GetAlarmStateBody struct { Req *types.GetAlarmState `xml:"urn:vim25 GetAlarmState,omitempty"` - Res *types.GetAlarmStateResponse `xml:"urn:vim25 GetAlarmStateResponse,omitempty"` + Res *types.GetAlarmStateResponse `xml:"GetAlarmStateResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5785,7 +6305,7 @@ func GetAlarmState(ctx context.Context, r soap.RoundTripper, req *types.GetAlarm type GetCustomizationSpecBody struct { Req *types.GetCustomizationSpec `xml:"urn:vim25 GetCustomizationSpec,omitempty"` - Res *types.GetCustomizationSpecResponse `xml:"urn:vim25 GetCustomizationSpecResponse,omitempty"` + Res *types.GetCustomizationSpecResponse `xml:"GetCustomizationSpecResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5803,9 +6323,29 @@ func GetCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *types.G return resBody.Res, nil } +type GetDefaultKmsClusterBody struct { + Req *types.GetDefaultKmsCluster `xml:"urn:vim25 GetDefaultKmsCluster,omitempty"` + Res *types.GetDefaultKmsClusterResponse `xml:"GetDefaultKmsClusterResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GetDefaultKmsClusterBody) Fault() *soap.Fault { return b.Fault_ } + +func GetDefaultKmsCluster(ctx context.Context, r soap.RoundTripper, req *types.GetDefaultKmsCluster) (*types.GetDefaultKmsClusterResponse, error) { + var reqBody, resBody GetDefaultKmsClusterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type GetPublicKeyBody struct { Req *types.GetPublicKey `xml:"urn:vim25 GetPublicKey,omitempty"` - Res *types.GetPublicKeyResponse `xml:"urn:vim25 GetPublicKeyResponse,omitempty"` + Res *types.GetPublicKeyResponse `xml:"GetPublicKeyResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5825,7 +6365,7 @@ func GetPublicKey(ctx context.Context, r soap.RoundTripper, req *types.GetPublic type GetResourceUsageBody struct { Req *types.GetResourceUsage `xml:"urn:vim25 GetResourceUsage,omitempty"` - Res *types.GetResourceUsageResponse `xml:"urn:vim25 GetResourceUsageResponse,omitempty"` + Res *types.GetResourceUsageResponse `xml:"GetResourceUsageResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5843,9 +6383,49 @@ func GetResourceUsage(ctx context.Context, r soap.RoundTripper, req *types.GetRe return resBody.Res, nil } +type GetSiteInfoBody struct { + Req *types.GetSiteInfo `xml:"urn:vim25 GetSiteInfo,omitempty"` + Res *types.GetSiteInfoResponse `xml:"GetSiteInfoResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GetSiteInfoBody) Fault() *soap.Fault { return b.Fault_ } + +func GetSiteInfo(ctx context.Context, r soap.RoundTripper, req *types.GetSiteInfo) (*types.GetSiteInfoResponse, error) { + var reqBody, resBody GetSiteInfoBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GetSystemVMsRestrictedDatastoresBody struct { + Req *types.GetSystemVMsRestrictedDatastores `xml:"urn:vim25 GetSystemVMsRestrictedDatastores,omitempty"` + Res *types.GetSystemVMsRestrictedDatastoresResponse `xml:"GetSystemVMsRestrictedDatastoresResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GetSystemVMsRestrictedDatastoresBody) Fault() *soap.Fault { return b.Fault_ } + +func GetSystemVMsRestrictedDatastores(ctx context.Context, r soap.RoundTripper, req *types.GetSystemVMsRestrictedDatastores) (*types.GetSystemVMsRestrictedDatastoresResponse, error) { + var reqBody, resBody GetSystemVMsRestrictedDatastoresBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type GetVchaClusterHealthBody struct { Req *types.GetVchaClusterHealth `xml:"urn:vim25 GetVchaClusterHealth,omitempty"` - Res *types.GetVchaClusterHealthResponse `xml:"urn:vim25 GetVchaClusterHealthResponse,omitempty"` + Res *types.GetVchaClusterHealthResponse `xml:"GetVchaClusterHealthResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5865,7 +6445,7 @@ func GetVchaClusterHealth(ctx context.Context, r soap.RoundTripper, req *types.G type GetVsanObjExtAttrsBody struct { Req *types.GetVsanObjExtAttrs `xml:"urn:vim25 GetVsanObjExtAttrs,omitempty"` - Res *types.GetVsanObjExtAttrsResponse `xml:"urn:vim25 GetVsanObjExtAttrsResponse,omitempty"` + Res *types.GetVsanObjExtAttrsResponse `xml:"GetVsanObjExtAttrsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5885,7 +6465,7 @@ func GetVsanObjExtAttrs(ctx context.Context, r soap.RoundTripper, req *types.Get type HasMonitoredEntityBody struct { Req *types.HasMonitoredEntity `xml:"urn:vim25 HasMonitoredEntity,omitempty"` - Res *types.HasMonitoredEntityResponse `xml:"urn:vim25 HasMonitoredEntityResponse,omitempty"` + Res *types.HasMonitoredEntityResponse `xml:"HasMonitoredEntityResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5905,7 +6485,7 @@ func HasMonitoredEntity(ctx context.Context, r soap.RoundTripper, req *types.Has type HasPrivilegeOnEntitiesBody struct { Req *types.HasPrivilegeOnEntities `xml:"urn:vim25 HasPrivilegeOnEntities,omitempty"` - Res *types.HasPrivilegeOnEntitiesResponse `xml:"urn:vim25 HasPrivilegeOnEntitiesResponse,omitempty"` + Res *types.HasPrivilegeOnEntitiesResponse `xml:"HasPrivilegeOnEntitiesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5925,7 +6505,7 @@ func HasPrivilegeOnEntities(ctx context.Context, r soap.RoundTripper, req *types type HasPrivilegeOnEntityBody struct { Req *types.HasPrivilegeOnEntity `xml:"urn:vim25 HasPrivilegeOnEntity,omitempty"` - Res *types.HasPrivilegeOnEntityResponse `xml:"urn:vim25 HasPrivilegeOnEntityResponse,omitempty"` + Res *types.HasPrivilegeOnEntityResponse `xml:"HasPrivilegeOnEntityResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5945,7 +6525,7 @@ func HasPrivilegeOnEntity(ctx context.Context, r soap.RoundTripper, req *types.H type HasProviderBody struct { Req *types.HasProvider `xml:"urn:vim25 HasProvider,omitempty"` - Res *types.HasProviderResponse `xml:"urn:vim25 HasProviderResponse,omitempty"` + Res *types.HasProviderResponse `xml:"HasProviderResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5965,7 +6545,7 @@ func HasProvider(ctx context.Context, r soap.RoundTripper, req *types.HasProvide type HasUserPrivilegeOnEntitiesBody struct { Req *types.HasUserPrivilegeOnEntities `xml:"urn:vim25 HasUserPrivilegeOnEntities,omitempty"` - Res *types.HasUserPrivilegeOnEntitiesResponse `xml:"urn:vim25 HasUserPrivilegeOnEntitiesResponse,omitempty"` + Res *types.HasUserPrivilegeOnEntitiesResponse `xml:"HasUserPrivilegeOnEntitiesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5985,7 +6565,7 @@ func HasUserPrivilegeOnEntities(ctx context.Context, r soap.RoundTripper, req *t type HostClearVStorageObjectControlFlagsBody struct { Req *types.HostClearVStorageObjectControlFlags `xml:"urn:vim25 HostClearVStorageObjectControlFlags,omitempty"` - Res *types.HostClearVStorageObjectControlFlagsResponse `xml:"urn:vim25 HostClearVStorageObjectControlFlagsResponse,omitempty"` + Res *types.HostClearVStorageObjectControlFlagsResponse `xml:"HostClearVStorageObjectControlFlagsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6005,7 +6585,7 @@ func HostClearVStorageObjectControlFlags(ctx context.Context, r soap.RoundTrippe type HostCloneVStorageObject_TaskBody struct { Req *types.HostCloneVStorageObject_Task `xml:"urn:vim25 HostCloneVStorageObject_Task,omitempty"` - Res *types.HostCloneVStorageObject_TaskResponse `xml:"urn:vim25 HostCloneVStorageObject_TaskResponse,omitempty"` + Res *types.HostCloneVStorageObject_TaskResponse `xml:"HostCloneVStorageObject_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6025,7 +6605,7 @@ func HostCloneVStorageObject_Task(ctx context.Context, r soap.RoundTripper, req type HostConfigVFlashCacheBody struct { Req *types.HostConfigVFlashCache `xml:"urn:vim25 HostConfigVFlashCache,omitempty"` - Res *types.HostConfigVFlashCacheResponse `xml:"urn:vim25 HostConfigVFlashCacheResponse,omitempty"` + Res *types.HostConfigVFlashCacheResponse `xml:"HostConfigVFlashCacheResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6045,7 +6625,7 @@ func HostConfigVFlashCache(ctx context.Context, r soap.RoundTripper, req *types. type HostConfigureVFlashResourceBody struct { Req *types.HostConfigureVFlashResource `xml:"urn:vim25 HostConfigureVFlashResource,omitempty"` - Res *types.HostConfigureVFlashResourceResponse `xml:"urn:vim25 HostConfigureVFlashResourceResponse,omitempty"` + Res *types.HostConfigureVFlashResourceResponse `xml:"HostConfigureVFlashResourceResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6065,7 +6645,7 @@ func HostConfigureVFlashResource(ctx context.Context, r soap.RoundTripper, req * type HostCreateDisk_TaskBody struct { Req *types.HostCreateDisk_Task `xml:"urn:vim25 HostCreateDisk_Task,omitempty"` - Res *types.HostCreateDisk_TaskResponse `xml:"urn:vim25 HostCreateDisk_TaskResponse,omitempty"` + Res *types.HostCreateDisk_TaskResponse `xml:"HostCreateDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6083,9 +6663,29 @@ func HostCreateDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.Ho return resBody.Res, nil } +type HostDeleteVStorageObjectEx_TaskBody struct { + Req *types.HostDeleteVStorageObjectEx_Task `xml:"urn:vim25 HostDeleteVStorageObjectEx_Task,omitempty"` + Res *types.HostDeleteVStorageObjectEx_TaskResponse `xml:"HostDeleteVStorageObjectEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostDeleteVStorageObjectEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func HostDeleteVStorageObjectEx_Task(ctx context.Context, r soap.RoundTripper, req *types.HostDeleteVStorageObjectEx_Task) (*types.HostDeleteVStorageObjectEx_TaskResponse, error) { + var reqBody, resBody HostDeleteVStorageObjectEx_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type HostDeleteVStorageObject_TaskBody struct { Req *types.HostDeleteVStorageObject_Task `xml:"urn:vim25 HostDeleteVStorageObject_Task,omitempty"` - Res *types.HostDeleteVStorageObject_TaskResponse `xml:"urn:vim25 HostDeleteVStorageObject_TaskResponse,omitempty"` + Res *types.HostDeleteVStorageObject_TaskResponse `xml:"HostDeleteVStorageObject_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6105,7 +6705,7 @@ func HostDeleteVStorageObject_Task(ctx context.Context, r soap.RoundTripper, req type HostExtendDisk_TaskBody struct { Req *types.HostExtendDisk_Task `xml:"urn:vim25 HostExtendDisk_Task,omitempty"` - Res *types.HostExtendDisk_TaskResponse `xml:"urn:vim25 HostExtendDisk_TaskResponse,omitempty"` + Res *types.HostExtendDisk_TaskResponse `xml:"HostExtendDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6125,7 +6725,7 @@ func HostExtendDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.Ho type HostGetVFlashModuleDefaultConfigBody struct { Req *types.HostGetVFlashModuleDefaultConfig `xml:"urn:vim25 HostGetVFlashModuleDefaultConfig,omitempty"` - Res *types.HostGetVFlashModuleDefaultConfigResponse `xml:"urn:vim25 HostGetVFlashModuleDefaultConfigResponse,omitempty"` + Res *types.HostGetVFlashModuleDefaultConfigResponse `xml:"HostGetVFlashModuleDefaultConfigResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6145,7 +6745,7 @@ func HostGetVFlashModuleDefaultConfig(ctx context.Context, r soap.RoundTripper, type HostImageConfigGetAcceptanceBody struct { Req *types.HostImageConfigGetAcceptance `xml:"urn:vim25 HostImageConfigGetAcceptance,omitempty"` - Res *types.HostImageConfigGetAcceptanceResponse `xml:"urn:vim25 HostImageConfigGetAcceptanceResponse,omitempty"` + Res *types.HostImageConfigGetAcceptanceResponse `xml:"HostImageConfigGetAcceptanceResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6165,7 +6765,7 @@ func HostImageConfigGetAcceptance(ctx context.Context, r soap.RoundTripper, req type HostImageConfigGetProfileBody struct { Req *types.HostImageConfigGetProfile `xml:"urn:vim25 HostImageConfigGetProfile,omitempty"` - Res *types.HostImageConfigGetProfileResponse `xml:"urn:vim25 HostImageConfigGetProfileResponse,omitempty"` + Res *types.HostImageConfigGetProfileResponse `xml:"HostImageConfigGetProfileResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6185,7 +6785,7 @@ func HostImageConfigGetProfile(ctx context.Context, r soap.RoundTripper, req *ty type HostInflateDisk_TaskBody struct { Req *types.HostInflateDisk_Task `xml:"urn:vim25 HostInflateDisk_Task,omitempty"` - Res *types.HostInflateDisk_TaskResponse `xml:"urn:vim25 HostInflateDisk_TaskResponse,omitempty"` + Res *types.HostInflateDisk_TaskResponse `xml:"HostInflateDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6205,7 +6805,7 @@ func HostInflateDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.H type HostListVStorageObjectBody struct { Req *types.HostListVStorageObject `xml:"urn:vim25 HostListVStorageObject,omitempty"` - Res *types.HostListVStorageObjectResponse `xml:"urn:vim25 HostListVStorageObjectResponse,omitempty"` + Res *types.HostListVStorageObjectResponse `xml:"HostListVStorageObjectResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6225,7 +6825,7 @@ func HostListVStorageObject(ctx context.Context, r soap.RoundTripper, req *types type HostProfileResetValidationStateBody struct { Req *types.HostProfileResetValidationState `xml:"urn:vim25 HostProfileResetValidationState,omitempty"` - Res *types.HostProfileResetValidationStateResponse `xml:"urn:vim25 HostProfileResetValidationStateResponse,omitempty"` + Res *types.HostProfileResetValidationStateResponse `xml:"HostProfileResetValidationStateResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6245,7 +6845,7 @@ func HostProfileResetValidationState(ctx context.Context, r soap.RoundTripper, r type HostReconcileDatastoreInventory_TaskBody struct { Req *types.HostReconcileDatastoreInventory_Task `xml:"urn:vim25 HostReconcileDatastoreInventory_Task,omitempty"` - Res *types.HostReconcileDatastoreInventory_TaskResponse `xml:"urn:vim25 HostReconcileDatastoreInventory_TaskResponse,omitempty"` + Res *types.HostReconcileDatastoreInventory_TaskResponse `xml:"HostReconcileDatastoreInventory_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6265,7 +6865,7 @@ func HostReconcileDatastoreInventory_Task(ctx context.Context, r soap.RoundTripp type HostRegisterDiskBody struct { Req *types.HostRegisterDisk `xml:"urn:vim25 HostRegisterDisk,omitempty"` - Res *types.HostRegisterDiskResponse `xml:"urn:vim25 HostRegisterDiskResponse,omitempty"` + Res *types.HostRegisterDiskResponse `xml:"HostRegisterDiskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6285,7 +6885,7 @@ func HostRegisterDisk(ctx context.Context, r soap.RoundTripper, req *types.HostR type HostRelocateVStorageObject_TaskBody struct { Req *types.HostRelocateVStorageObject_Task `xml:"urn:vim25 HostRelocateVStorageObject_Task,omitempty"` - Res *types.HostRelocateVStorageObject_TaskResponse `xml:"urn:vim25 HostRelocateVStorageObject_TaskResponse,omitempty"` + Res *types.HostRelocateVStorageObject_TaskResponse `xml:"HostRelocateVStorageObject_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6305,7 +6905,7 @@ func HostRelocateVStorageObject_Task(ctx context.Context, r soap.RoundTripper, r type HostRemoveVFlashResourceBody struct { Req *types.HostRemoveVFlashResource `xml:"urn:vim25 HostRemoveVFlashResource,omitempty"` - Res *types.HostRemoveVFlashResourceResponse `xml:"urn:vim25 HostRemoveVFlashResourceResponse,omitempty"` + Res *types.HostRemoveVFlashResourceResponse `xml:"HostRemoveVFlashResourceResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6325,7 +6925,7 @@ func HostRemoveVFlashResource(ctx context.Context, r soap.RoundTripper, req *typ type HostRenameVStorageObjectBody struct { Req *types.HostRenameVStorageObject `xml:"urn:vim25 HostRenameVStorageObject,omitempty"` - Res *types.HostRenameVStorageObjectResponse `xml:"urn:vim25 HostRenameVStorageObjectResponse,omitempty"` + Res *types.HostRenameVStorageObjectResponse `xml:"HostRenameVStorageObjectResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6345,7 +6945,7 @@ func HostRenameVStorageObject(ctx context.Context, r soap.RoundTripper, req *typ type HostRetrieveVStorageInfrastructureObjectPolicyBody struct { Req *types.HostRetrieveVStorageInfrastructureObjectPolicy `xml:"urn:vim25 HostRetrieveVStorageInfrastructureObjectPolicy,omitempty"` - Res *types.HostRetrieveVStorageInfrastructureObjectPolicyResponse `xml:"urn:vim25 HostRetrieveVStorageInfrastructureObjectPolicyResponse,omitempty"` + Res *types.HostRetrieveVStorageInfrastructureObjectPolicyResponse `xml:"HostRetrieveVStorageInfrastructureObjectPolicyResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6365,7 +6965,7 @@ func HostRetrieveVStorageInfrastructureObjectPolicy(ctx context.Context, r soap. type HostRetrieveVStorageObjectBody struct { Req *types.HostRetrieveVStorageObject `xml:"urn:vim25 HostRetrieveVStorageObject,omitempty"` - Res *types.HostRetrieveVStorageObjectResponse `xml:"urn:vim25 HostRetrieveVStorageObjectResponse,omitempty"` + Res *types.HostRetrieveVStorageObjectResponse `xml:"HostRetrieveVStorageObjectResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6383,9 +6983,49 @@ func HostRetrieveVStorageObject(ctx context.Context, r soap.RoundTripper, req *t return resBody.Res, nil } +type HostRetrieveVStorageObjectMetadataBody struct { + Req *types.HostRetrieveVStorageObjectMetadata `xml:"urn:vim25 HostRetrieveVStorageObjectMetadata,omitempty"` + Res *types.HostRetrieveVStorageObjectMetadataResponse `xml:"HostRetrieveVStorageObjectMetadataResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostRetrieveVStorageObjectMetadataBody) Fault() *soap.Fault { return b.Fault_ } + +func HostRetrieveVStorageObjectMetadata(ctx context.Context, r soap.RoundTripper, req *types.HostRetrieveVStorageObjectMetadata) (*types.HostRetrieveVStorageObjectMetadataResponse, error) { + var reqBody, resBody HostRetrieveVStorageObjectMetadataBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostRetrieveVStorageObjectMetadataValueBody struct { + Req *types.HostRetrieveVStorageObjectMetadataValue `xml:"urn:vim25 HostRetrieveVStorageObjectMetadataValue,omitempty"` + Res *types.HostRetrieveVStorageObjectMetadataValueResponse `xml:"HostRetrieveVStorageObjectMetadataValueResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostRetrieveVStorageObjectMetadataValueBody) Fault() *soap.Fault { return b.Fault_ } + +func HostRetrieveVStorageObjectMetadataValue(ctx context.Context, r soap.RoundTripper, req *types.HostRetrieveVStorageObjectMetadataValue) (*types.HostRetrieveVStorageObjectMetadataValueResponse, error) { + var reqBody, resBody HostRetrieveVStorageObjectMetadataValueBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type HostRetrieveVStorageObjectStateBody struct { Req *types.HostRetrieveVStorageObjectState `xml:"urn:vim25 HostRetrieveVStorageObjectState,omitempty"` - Res *types.HostRetrieveVStorageObjectStateResponse `xml:"urn:vim25 HostRetrieveVStorageObjectStateResponse,omitempty"` + Res *types.HostRetrieveVStorageObjectStateResponse `xml:"HostRetrieveVStorageObjectStateResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6405,7 +7045,7 @@ func HostRetrieveVStorageObjectState(ctx context.Context, r soap.RoundTripper, r type HostScheduleReconcileDatastoreInventoryBody struct { Req *types.HostScheduleReconcileDatastoreInventory `xml:"urn:vim25 HostScheduleReconcileDatastoreInventory,omitempty"` - Res *types.HostScheduleReconcileDatastoreInventoryResponse `xml:"urn:vim25 HostScheduleReconcileDatastoreInventoryResponse,omitempty"` + Res *types.HostScheduleReconcileDatastoreInventoryResponse `xml:"HostScheduleReconcileDatastoreInventoryResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6425,7 +7065,7 @@ func HostScheduleReconcileDatastoreInventory(ctx context.Context, r soap.RoundTr type HostSetVStorageObjectControlFlagsBody struct { Req *types.HostSetVStorageObjectControlFlags `xml:"urn:vim25 HostSetVStorageObjectControlFlags,omitempty"` - Res *types.HostSetVStorageObjectControlFlagsResponse `xml:"urn:vim25 HostSetVStorageObjectControlFlagsResponse,omitempty"` + Res *types.HostSetVStorageObjectControlFlagsResponse `xml:"HostSetVStorageObjectControlFlagsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6445,7 +7085,7 @@ func HostSetVStorageObjectControlFlags(ctx context.Context, r soap.RoundTripper, type HostSpecGetUpdatedHostsBody struct { Req *types.HostSpecGetUpdatedHosts `xml:"urn:vim25 HostSpecGetUpdatedHosts,omitempty"` - Res *types.HostSpecGetUpdatedHostsResponse `xml:"urn:vim25 HostSpecGetUpdatedHostsResponse,omitempty"` + Res *types.HostSpecGetUpdatedHostsResponse `xml:"HostSpecGetUpdatedHostsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6463,9 +7103,49 @@ func HostSpecGetUpdatedHosts(ctx context.Context, r soap.RoundTripper, req *type return resBody.Res, nil } +type HostUpdateVStorageObjectMetadataEx_TaskBody struct { + Req *types.HostUpdateVStorageObjectMetadataEx_Task `xml:"urn:vim25 HostUpdateVStorageObjectMetadataEx_Task,omitempty"` + Res *types.HostUpdateVStorageObjectMetadataEx_TaskResponse `xml:"HostUpdateVStorageObjectMetadataEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostUpdateVStorageObjectMetadataEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func HostUpdateVStorageObjectMetadataEx_Task(ctx context.Context, r soap.RoundTripper, req *types.HostUpdateVStorageObjectMetadataEx_Task) (*types.HostUpdateVStorageObjectMetadataEx_TaskResponse, error) { + var reqBody, resBody HostUpdateVStorageObjectMetadataEx_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostUpdateVStorageObjectMetadata_TaskBody struct { + Req *types.HostUpdateVStorageObjectMetadata_Task `xml:"urn:vim25 HostUpdateVStorageObjectMetadata_Task,omitempty"` + Res *types.HostUpdateVStorageObjectMetadata_TaskResponse `xml:"HostUpdateVStorageObjectMetadata_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostUpdateVStorageObjectMetadata_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func HostUpdateVStorageObjectMetadata_Task(ctx context.Context, r soap.RoundTripper, req *types.HostUpdateVStorageObjectMetadata_Task) (*types.HostUpdateVStorageObjectMetadata_TaskResponse, error) { + var reqBody, resBody HostUpdateVStorageObjectMetadata_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type HostVStorageObjectCreateDiskFromSnapshot_TaskBody struct { Req *types.HostVStorageObjectCreateDiskFromSnapshot_Task `xml:"urn:vim25 HostVStorageObjectCreateDiskFromSnapshot_Task,omitempty"` - Res *types.HostVStorageObjectCreateDiskFromSnapshot_TaskResponse `xml:"urn:vim25 HostVStorageObjectCreateDiskFromSnapshot_TaskResponse,omitempty"` + Res *types.HostVStorageObjectCreateDiskFromSnapshot_TaskResponse `xml:"HostVStorageObjectCreateDiskFromSnapshot_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6485,7 +7165,7 @@ func HostVStorageObjectCreateDiskFromSnapshot_Task(ctx context.Context, r soap.R type HostVStorageObjectCreateSnapshot_TaskBody struct { Req *types.HostVStorageObjectCreateSnapshot_Task `xml:"urn:vim25 HostVStorageObjectCreateSnapshot_Task,omitempty"` - Res *types.HostVStorageObjectCreateSnapshot_TaskResponse `xml:"urn:vim25 HostVStorageObjectCreateSnapshot_TaskResponse,omitempty"` + Res *types.HostVStorageObjectCreateSnapshot_TaskResponse `xml:"HostVStorageObjectCreateSnapshot_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6505,7 +7185,7 @@ func HostVStorageObjectCreateSnapshot_Task(ctx context.Context, r soap.RoundTrip type HostVStorageObjectDeleteSnapshot_TaskBody struct { Req *types.HostVStorageObjectDeleteSnapshot_Task `xml:"urn:vim25 HostVStorageObjectDeleteSnapshot_Task,omitempty"` - Res *types.HostVStorageObjectDeleteSnapshot_TaskResponse `xml:"urn:vim25 HostVStorageObjectDeleteSnapshot_TaskResponse,omitempty"` + Res *types.HostVStorageObjectDeleteSnapshot_TaskResponse `xml:"HostVStorageObjectDeleteSnapshot_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6525,7 +7205,7 @@ func HostVStorageObjectDeleteSnapshot_Task(ctx context.Context, r soap.RoundTrip type HostVStorageObjectRetrieveSnapshotInfoBody struct { Req *types.HostVStorageObjectRetrieveSnapshotInfo `xml:"urn:vim25 HostVStorageObjectRetrieveSnapshotInfo,omitempty"` - Res *types.HostVStorageObjectRetrieveSnapshotInfoResponse `xml:"urn:vim25 HostVStorageObjectRetrieveSnapshotInfoResponse,omitempty"` + Res *types.HostVStorageObjectRetrieveSnapshotInfoResponse `xml:"HostVStorageObjectRetrieveSnapshotInfoResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6545,7 +7225,7 @@ func HostVStorageObjectRetrieveSnapshotInfo(ctx context.Context, r soap.RoundTri type HostVStorageObjectRevert_TaskBody struct { Req *types.HostVStorageObjectRevert_Task `xml:"urn:vim25 HostVStorageObjectRevert_Task,omitempty"` - Res *types.HostVStorageObjectRevert_TaskResponse `xml:"urn:vim25 HostVStorageObjectRevert_TaskResponse,omitempty"` + Res *types.HostVStorageObjectRevert_TaskResponse `xml:"HostVStorageObjectRevert_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6565,7 +7245,7 @@ func HostVStorageObjectRevert_Task(ctx context.Context, r soap.RoundTripper, req type HttpNfcLeaseAbortBody struct { Req *types.HttpNfcLeaseAbort `xml:"urn:vim25 HttpNfcLeaseAbort,omitempty"` - Res *types.HttpNfcLeaseAbortResponse `xml:"urn:vim25 HttpNfcLeaseAbortResponse,omitempty"` + Res *types.HttpNfcLeaseAbortResponse `xml:"HttpNfcLeaseAbortResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6585,7 +7265,7 @@ func HttpNfcLeaseAbort(ctx context.Context, r soap.RoundTripper, req *types.Http type HttpNfcLeaseCompleteBody struct { Req *types.HttpNfcLeaseComplete `xml:"urn:vim25 HttpNfcLeaseComplete,omitempty"` - Res *types.HttpNfcLeaseCompleteResponse `xml:"urn:vim25 HttpNfcLeaseCompleteResponse,omitempty"` + Res *types.HttpNfcLeaseCompleteResponse `xml:"HttpNfcLeaseCompleteResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6605,7 +7285,7 @@ func HttpNfcLeaseComplete(ctx context.Context, r soap.RoundTripper, req *types.H type HttpNfcLeaseGetManifestBody struct { Req *types.HttpNfcLeaseGetManifest `xml:"urn:vim25 HttpNfcLeaseGetManifest,omitempty"` - Res *types.HttpNfcLeaseGetManifestResponse `xml:"urn:vim25 HttpNfcLeaseGetManifestResponse,omitempty"` + Res *types.HttpNfcLeaseGetManifestResponse `xml:"HttpNfcLeaseGetManifestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6623,9 +7303,29 @@ func HttpNfcLeaseGetManifest(ctx context.Context, r soap.RoundTripper, req *type return resBody.Res, nil } +type HttpNfcLeaseProbeUrlsBody struct { + Req *types.HttpNfcLeaseProbeUrls `xml:"urn:vim25 HttpNfcLeaseProbeUrls,omitempty"` + Res *types.HttpNfcLeaseProbeUrlsResponse `xml:"HttpNfcLeaseProbeUrlsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HttpNfcLeaseProbeUrlsBody) Fault() *soap.Fault { return b.Fault_ } + +func HttpNfcLeaseProbeUrls(ctx context.Context, r soap.RoundTripper, req *types.HttpNfcLeaseProbeUrls) (*types.HttpNfcLeaseProbeUrlsResponse, error) { + var reqBody, resBody HttpNfcLeaseProbeUrlsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type HttpNfcLeaseProgressBody struct { Req *types.HttpNfcLeaseProgress `xml:"urn:vim25 HttpNfcLeaseProgress,omitempty"` - Res *types.HttpNfcLeaseProgressResponse `xml:"urn:vim25 HttpNfcLeaseProgressResponse,omitempty"` + Res *types.HttpNfcLeaseProgressResponse `xml:"HttpNfcLeaseProgressResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6645,7 +7345,7 @@ func HttpNfcLeaseProgress(ctx context.Context, r soap.RoundTripper, req *types.H type HttpNfcLeasePullFromUrls_TaskBody struct { Req *types.HttpNfcLeasePullFromUrls_Task `xml:"urn:vim25 HttpNfcLeasePullFromUrls_Task,omitempty"` - Res *types.HttpNfcLeasePullFromUrls_TaskResponse `xml:"urn:vim25 HttpNfcLeasePullFromUrls_TaskResponse,omitempty"` + Res *types.HttpNfcLeasePullFromUrls_TaskResponse `xml:"HttpNfcLeasePullFromUrls_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6665,7 +7365,7 @@ func HttpNfcLeasePullFromUrls_Task(ctx context.Context, r soap.RoundTripper, req type HttpNfcLeaseSetManifestChecksumTypeBody struct { Req *types.HttpNfcLeaseSetManifestChecksumType `xml:"urn:vim25 HttpNfcLeaseSetManifestChecksumType,omitempty"` - Res *types.HttpNfcLeaseSetManifestChecksumTypeResponse `xml:"urn:vim25 HttpNfcLeaseSetManifestChecksumTypeResponse,omitempty"` + Res *types.HttpNfcLeaseSetManifestChecksumTypeResponse `xml:"HttpNfcLeaseSetManifestChecksumTypeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6685,7 +7385,7 @@ func HttpNfcLeaseSetManifestChecksumType(ctx context.Context, r soap.RoundTrippe type ImpersonateUserBody struct { Req *types.ImpersonateUser `xml:"urn:vim25 ImpersonateUser,omitempty"` - Res *types.ImpersonateUserResponse `xml:"urn:vim25 ImpersonateUserResponse,omitempty"` + Res *types.ImpersonateUserResponse `xml:"ImpersonateUserResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6705,7 +7405,7 @@ func ImpersonateUser(ctx context.Context, r soap.RoundTripper, req *types.Impers type ImportCertificateForCAM_TaskBody struct { Req *types.ImportCertificateForCAM_Task `xml:"urn:vim25 ImportCertificateForCAM_Task,omitempty"` - Res *types.ImportCertificateForCAM_TaskResponse `xml:"urn:vim25 ImportCertificateForCAM_TaskResponse,omitempty"` + Res *types.ImportCertificateForCAM_TaskResponse `xml:"ImportCertificateForCAM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6725,7 +7425,7 @@ func ImportCertificateForCAM_Task(ctx context.Context, r soap.RoundTripper, req type ImportUnmanagedSnapshotBody struct { Req *types.ImportUnmanagedSnapshot `xml:"urn:vim25 ImportUnmanagedSnapshot,omitempty"` - Res *types.ImportUnmanagedSnapshotResponse `xml:"urn:vim25 ImportUnmanagedSnapshotResponse,omitempty"` + Res *types.ImportUnmanagedSnapshotResponse `xml:"ImportUnmanagedSnapshotResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6745,7 +7445,7 @@ func ImportUnmanagedSnapshot(ctx context.Context, r soap.RoundTripper, req *type type ImportVAppBody struct { Req *types.ImportVApp `xml:"urn:vim25 ImportVApp,omitempty"` - Res *types.ImportVAppResponse `xml:"urn:vim25 ImportVAppResponse,omitempty"` + Res *types.ImportVAppResponse `xml:"ImportVAppResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6765,7 +7465,7 @@ func ImportVApp(ctx context.Context, r soap.RoundTripper, req *types.ImportVApp) type InflateDisk_TaskBody struct { Req *types.InflateDisk_Task `xml:"urn:vim25 InflateDisk_Task,omitempty"` - Res *types.InflateDisk_TaskResponse `xml:"urn:vim25 InflateDisk_TaskResponse,omitempty"` + Res *types.InflateDisk_TaskResponse `xml:"InflateDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6785,7 +7485,7 @@ func InflateDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.Infla type InflateVirtualDisk_TaskBody struct { Req *types.InflateVirtualDisk_Task `xml:"urn:vim25 InflateVirtualDisk_Task,omitempty"` - Res *types.InflateVirtualDisk_TaskResponse `xml:"urn:vim25 InflateVirtualDisk_TaskResponse,omitempty"` + Res *types.InflateVirtualDisk_TaskResponse `xml:"InflateVirtualDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6805,7 +7505,7 @@ func InflateVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *type type InitializeDisks_TaskBody struct { Req *types.InitializeDisks_Task `xml:"urn:vim25 InitializeDisks_Task,omitempty"` - Res *types.InitializeDisks_TaskResponse `xml:"urn:vim25 InitializeDisks_TaskResponse,omitempty"` + Res *types.InitializeDisks_TaskResponse `xml:"InitializeDisks_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6825,7 +7525,7 @@ func InitializeDisks_Task(ctx context.Context, r soap.RoundTripper, req *types.I type InitiateFileTransferFromGuestBody struct { Req *types.InitiateFileTransferFromGuest `xml:"urn:vim25 InitiateFileTransferFromGuest,omitempty"` - Res *types.InitiateFileTransferFromGuestResponse `xml:"urn:vim25 InitiateFileTransferFromGuestResponse,omitempty"` + Res *types.InitiateFileTransferFromGuestResponse `xml:"InitiateFileTransferFromGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6845,7 +7545,7 @@ func InitiateFileTransferFromGuest(ctx context.Context, r soap.RoundTripper, req type InitiateFileTransferToGuestBody struct { Req *types.InitiateFileTransferToGuest `xml:"urn:vim25 InitiateFileTransferToGuest,omitempty"` - Res *types.InitiateFileTransferToGuestResponse `xml:"urn:vim25 InitiateFileTransferToGuestResponse,omitempty"` + Res *types.InitiateFileTransferToGuestResponse `xml:"InitiateFileTransferToGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6865,7 +7565,7 @@ func InitiateFileTransferToGuest(ctx context.Context, r soap.RoundTripper, req * type InstallHostPatchV2_TaskBody struct { Req *types.InstallHostPatchV2_Task `xml:"urn:vim25 InstallHostPatchV2_Task,omitempty"` - Res *types.InstallHostPatchV2_TaskResponse `xml:"urn:vim25 InstallHostPatchV2_TaskResponse,omitempty"` + Res *types.InstallHostPatchV2_TaskResponse `xml:"InstallHostPatchV2_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6885,7 +7585,7 @@ func InstallHostPatchV2_Task(ctx context.Context, r soap.RoundTripper, req *type type InstallHostPatch_TaskBody struct { Req *types.InstallHostPatch_Task `xml:"urn:vim25 InstallHostPatch_Task,omitempty"` - Res *types.InstallHostPatch_TaskResponse `xml:"urn:vim25 InstallHostPatch_TaskResponse,omitempty"` + Res *types.InstallHostPatch_TaskResponse `xml:"InstallHostPatch_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6905,7 +7605,7 @@ func InstallHostPatch_Task(ctx context.Context, r soap.RoundTripper, req *types. type InstallIoFilter_TaskBody struct { Req *types.InstallIoFilter_Task `xml:"urn:vim25 InstallIoFilter_Task,omitempty"` - Res *types.InstallIoFilter_TaskResponse `xml:"urn:vim25 InstallIoFilter_TaskResponse,omitempty"` + Res *types.InstallIoFilter_TaskResponse `xml:"InstallIoFilter_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6925,7 +7625,7 @@ func InstallIoFilter_Task(ctx context.Context, r soap.RoundTripper, req *types.I type InstallServerCertificateBody struct { Req *types.InstallServerCertificate `xml:"urn:vim25 InstallServerCertificate,omitempty"` - Res *types.InstallServerCertificateResponse `xml:"urn:vim25 InstallServerCertificateResponse,omitempty"` + Res *types.InstallServerCertificateResponse `xml:"InstallServerCertificateResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6945,7 +7645,7 @@ func InstallServerCertificate(ctx context.Context, r soap.RoundTripper, req *typ type InstallSmartCardTrustAnchorBody struct { Req *types.InstallSmartCardTrustAnchor `xml:"urn:vim25 InstallSmartCardTrustAnchor,omitempty"` - Res *types.InstallSmartCardTrustAnchorResponse `xml:"urn:vim25 InstallSmartCardTrustAnchorResponse,omitempty"` + Res *types.InstallSmartCardTrustAnchorResponse `xml:"InstallSmartCardTrustAnchorResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6965,7 +7665,7 @@ func InstallSmartCardTrustAnchor(ctx context.Context, r soap.RoundTripper, req * type InstantClone_TaskBody struct { Req *types.InstantClone_Task `xml:"urn:vim25 InstantClone_Task,omitempty"` - Res *types.InstantClone_TaskResponse `xml:"urn:vim25 InstantClone_TaskResponse,omitempty"` + Res *types.InstantClone_TaskResponse `xml:"InstantClone_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6983,9 +7683,29 @@ func InstantClone_Task(ctx context.Context, r soap.RoundTripper, req *types.Inst return resBody.Res, nil } +type IsKmsClusterActiveBody struct { + Req *types.IsKmsClusterActive `xml:"urn:vim25 IsKmsClusterActive,omitempty"` + Res *types.IsKmsClusterActiveResponse `xml:"IsKmsClusterActiveResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *IsKmsClusterActiveBody) Fault() *soap.Fault { return b.Fault_ } + +func IsKmsClusterActive(ctx context.Context, r soap.RoundTripper, req *types.IsKmsClusterActive) (*types.IsKmsClusterActiveResponse, error) { + var reqBody, resBody IsKmsClusterActiveBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type IsSharedGraphicsActiveBody struct { Req *types.IsSharedGraphicsActive `xml:"urn:vim25 IsSharedGraphicsActive,omitempty"` - Res *types.IsSharedGraphicsActiveResponse `xml:"urn:vim25 IsSharedGraphicsActiveResponse,omitempty"` + Res *types.IsSharedGraphicsActiveResponse `xml:"IsSharedGraphicsActiveResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7005,7 +7725,7 @@ func IsSharedGraphicsActive(ctx context.Context, r soap.RoundTripper, req *types type JoinDomainWithCAM_TaskBody struct { Req *types.JoinDomainWithCAM_Task `xml:"urn:vim25 JoinDomainWithCAM_Task,omitempty"` - Res *types.JoinDomainWithCAM_TaskResponse `xml:"urn:vim25 JoinDomainWithCAM_TaskResponse,omitempty"` + Res *types.JoinDomainWithCAM_TaskResponse `xml:"JoinDomainWithCAM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7025,7 +7745,7 @@ func JoinDomainWithCAM_Task(ctx context.Context, r soap.RoundTripper, req *types type JoinDomain_TaskBody struct { Req *types.JoinDomain_Task `xml:"urn:vim25 JoinDomain_Task,omitempty"` - Res *types.JoinDomain_TaskResponse `xml:"urn:vim25 JoinDomain_TaskResponse,omitempty"` + Res *types.JoinDomain_TaskResponse `xml:"JoinDomain_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7045,7 +7765,7 @@ func JoinDomain_Task(ctx context.Context, r soap.RoundTripper, req *types.JoinDo type LeaveCurrentDomain_TaskBody struct { Req *types.LeaveCurrentDomain_Task `xml:"urn:vim25 LeaveCurrentDomain_Task,omitempty"` - Res *types.LeaveCurrentDomain_TaskResponse `xml:"urn:vim25 LeaveCurrentDomain_TaskResponse,omitempty"` + Res *types.LeaveCurrentDomain_TaskResponse `xml:"LeaveCurrentDomain_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7065,7 +7785,7 @@ func LeaveCurrentDomain_Task(ctx context.Context, r soap.RoundTripper, req *type type ListCACertificateRevocationListsBody struct { Req *types.ListCACertificateRevocationLists `xml:"urn:vim25 ListCACertificateRevocationLists,omitempty"` - Res *types.ListCACertificateRevocationListsResponse `xml:"urn:vim25 ListCACertificateRevocationListsResponse,omitempty"` + Res *types.ListCACertificateRevocationListsResponse `xml:"ListCACertificateRevocationListsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7085,7 +7805,7 @@ func ListCACertificateRevocationLists(ctx context.Context, r soap.RoundTripper, type ListCACertificatesBody struct { Req *types.ListCACertificates `xml:"urn:vim25 ListCACertificates,omitempty"` - Res *types.ListCACertificatesResponse `xml:"urn:vim25 ListCACertificatesResponse,omitempty"` + Res *types.ListCACertificatesResponse `xml:"ListCACertificatesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7105,7 +7825,7 @@ func ListCACertificates(ctx context.Context, r soap.RoundTripper, req *types.Lis type ListFilesInGuestBody struct { Req *types.ListFilesInGuest `xml:"urn:vim25 ListFilesInGuest,omitempty"` - Res *types.ListFilesInGuestResponse `xml:"urn:vim25 ListFilesInGuestResponse,omitempty"` + Res *types.ListFilesInGuestResponse `xml:"ListFilesInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7125,7 +7845,7 @@ func ListFilesInGuest(ctx context.Context, r soap.RoundTripper, req *types.ListF type ListGuestAliasesBody struct { Req *types.ListGuestAliases `xml:"urn:vim25 ListGuestAliases,omitempty"` - Res *types.ListGuestAliasesResponse `xml:"urn:vim25 ListGuestAliasesResponse,omitempty"` + Res *types.ListGuestAliasesResponse `xml:"ListGuestAliasesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7145,7 +7865,7 @@ func ListGuestAliases(ctx context.Context, r soap.RoundTripper, req *types.ListG type ListGuestMappedAliasesBody struct { Req *types.ListGuestMappedAliases `xml:"urn:vim25 ListGuestMappedAliases,omitempty"` - Res *types.ListGuestMappedAliasesResponse `xml:"urn:vim25 ListGuestMappedAliasesResponse,omitempty"` + Res *types.ListGuestMappedAliasesResponse `xml:"ListGuestMappedAliasesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7165,7 +7885,7 @@ func ListGuestMappedAliases(ctx context.Context, r soap.RoundTripper, req *types type ListKeysBody struct { Req *types.ListKeys `xml:"urn:vim25 ListKeys,omitempty"` - Res *types.ListKeysResponse `xml:"urn:vim25 ListKeysResponse,omitempty"` + Res *types.ListKeysResponse `xml:"ListKeysResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7183,16 +7903,36 @@ func ListKeys(ctx context.Context, r soap.RoundTripper, req *types.ListKeys) (*t return resBody.Res, nil } -type ListKmipServersBody struct { - Req *types.ListKmipServers `xml:"urn:vim25 ListKmipServers,omitempty"` - Res *types.ListKmipServersResponse `xml:"urn:vim25 ListKmipServersResponse,omitempty"` +type ListKmipServersBody struct { + Req *types.ListKmipServers `xml:"urn:vim25 ListKmipServers,omitempty"` + Res *types.ListKmipServersResponse `xml:"ListKmipServersResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ListKmipServersBody) Fault() *soap.Fault { return b.Fault_ } + +func ListKmipServers(ctx context.Context, r soap.RoundTripper, req *types.ListKmipServers) (*types.ListKmipServersResponse, error) { + var reqBody, resBody ListKmipServersBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ListKmsClustersBody struct { + Req *types.ListKmsClusters `xml:"urn:vim25 ListKmsClusters,omitempty"` + Res *types.ListKmsClustersResponse `xml:"ListKmsClustersResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } -func (b *ListKmipServersBody) Fault() *soap.Fault { return b.Fault_ } +func (b *ListKmsClustersBody) Fault() *soap.Fault { return b.Fault_ } -func ListKmipServers(ctx context.Context, r soap.RoundTripper, req *types.ListKmipServers) (*types.ListKmipServersResponse, error) { - var reqBody, resBody ListKmipServersBody +func ListKmsClusters(ctx context.Context, r soap.RoundTripper, req *types.ListKmsClusters) (*types.ListKmsClustersResponse, error) { + var reqBody, resBody ListKmsClustersBody reqBody.Req = req @@ -7205,7 +7945,7 @@ func ListKmipServers(ctx context.Context, r soap.RoundTripper, req *types.ListKm type ListProcessesInGuestBody struct { Req *types.ListProcessesInGuest `xml:"urn:vim25 ListProcessesInGuest,omitempty"` - Res *types.ListProcessesInGuestResponse `xml:"urn:vim25 ListProcessesInGuestResponse,omitempty"` + Res *types.ListProcessesInGuestResponse `xml:"ListProcessesInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7225,7 +7965,7 @@ func ListProcessesInGuest(ctx context.Context, r soap.RoundTripper, req *types.L type ListRegistryKeysInGuestBody struct { Req *types.ListRegistryKeysInGuest `xml:"urn:vim25 ListRegistryKeysInGuest,omitempty"` - Res *types.ListRegistryKeysInGuestResponse `xml:"urn:vim25 ListRegistryKeysInGuestResponse,omitempty"` + Res *types.ListRegistryKeysInGuestResponse `xml:"ListRegistryKeysInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7245,7 +7985,7 @@ func ListRegistryKeysInGuest(ctx context.Context, r soap.RoundTripper, req *type type ListRegistryValuesInGuestBody struct { Req *types.ListRegistryValuesInGuest `xml:"urn:vim25 ListRegistryValuesInGuest,omitempty"` - Res *types.ListRegistryValuesInGuestResponse `xml:"urn:vim25 ListRegistryValuesInGuestResponse,omitempty"` + Res *types.ListRegistryValuesInGuestResponse `xml:"ListRegistryValuesInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7265,7 +8005,7 @@ func ListRegistryValuesInGuest(ctx context.Context, r soap.RoundTripper, req *ty type ListSmartCardTrustAnchorsBody struct { Req *types.ListSmartCardTrustAnchors `xml:"urn:vim25 ListSmartCardTrustAnchors,omitempty"` - Res *types.ListSmartCardTrustAnchorsResponse `xml:"urn:vim25 ListSmartCardTrustAnchorsResponse,omitempty"` + Res *types.ListSmartCardTrustAnchorsResponse `xml:"ListSmartCardTrustAnchorsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7285,7 +8025,7 @@ func ListSmartCardTrustAnchors(ctx context.Context, r soap.RoundTripper, req *ty type ListTagsAttachedToVStorageObjectBody struct { Req *types.ListTagsAttachedToVStorageObject `xml:"urn:vim25 ListTagsAttachedToVStorageObject,omitempty"` - Res *types.ListTagsAttachedToVStorageObjectResponse `xml:"urn:vim25 ListTagsAttachedToVStorageObjectResponse,omitempty"` + Res *types.ListTagsAttachedToVStorageObjectResponse `xml:"ListTagsAttachedToVStorageObjectResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7305,7 +8045,7 @@ func ListTagsAttachedToVStorageObject(ctx context.Context, r soap.RoundTripper, type ListVStorageObjectBody struct { Req *types.ListVStorageObject `xml:"urn:vim25 ListVStorageObject,omitempty"` - Res *types.ListVStorageObjectResponse `xml:"urn:vim25 ListVStorageObjectResponse,omitempty"` + Res *types.ListVStorageObjectResponse `xml:"ListVStorageObjectResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7325,7 +8065,7 @@ func ListVStorageObject(ctx context.Context, r soap.RoundTripper, req *types.Lis type ListVStorageObjectsAttachedToTagBody struct { Req *types.ListVStorageObjectsAttachedToTag `xml:"urn:vim25 ListVStorageObjectsAttachedToTag,omitempty"` - Res *types.ListVStorageObjectsAttachedToTagResponse `xml:"urn:vim25 ListVStorageObjectsAttachedToTagResponse,omitempty"` + Res *types.ListVStorageObjectsAttachedToTagResponse `xml:"ListVStorageObjectsAttachedToTagResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7345,7 +8085,7 @@ func ListVStorageObjectsAttachedToTag(ctx context.Context, r soap.RoundTripper, type LogUserEventBody struct { Req *types.LogUserEvent `xml:"urn:vim25 LogUserEvent,omitempty"` - Res *types.LogUserEventResponse `xml:"urn:vim25 LogUserEventResponse,omitempty"` + Res *types.LogUserEventResponse `xml:"LogUserEventResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7365,7 +8105,7 @@ func LogUserEvent(ctx context.Context, r soap.RoundTripper, req *types.LogUserEv type LoginBody struct { Req *types.Login `xml:"urn:vim25 Login,omitempty"` - Res *types.LoginResponse `xml:"urn:vim25 LoginResponse,omitempty"` + Res *types.LoginResponse `xml:"LoginResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7385,7 +8125,7 @@ func Login(ctx context.Context, r soap.RoundTripper, req *types.Login) (*types.L type LoginBySSPIBody struct { Req *types.LoginBySSPI `xml:"urn:vim25 LoginBySSPI,omitempty"` - Res *types.LoginBySSPIResponse `xml:"urn:vim25 LoginBySSPIResponse,omitempty"` + Res *types.LoginBySSPIResponse `xml:"LoginBySSPIResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7405,7 +8145,7 @@ func LoginBySSPI(ctx context.Context, r soap.RoundTripper, req *types.LoginBySSP type LoginByTokenBody struct { Req *types.LoginByToken `xml:"urn:vim25 LoginByToken,omitempty"` - Res *types.LoginByTokenResponse `xml:"urn:vim25 LoginByTokenResponse,omitempty"` + Res *types.LoginByTokenResponse `xml:"LoginByTokenResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7425,7 +8165,7 @@ func LoginByToken(ctx context.Context, r soap.RoundTripper, req *types.LoginByTo type LoginExtensionByCertificateBody struct { Req *types.LoginExtensionByCertificate `xml:"urn:vim25 LoginExtensionByCertificate,omitempty"` - Res *types.LoginExtensionByCertificateResponse `xml:"urn:vim25 LoginExtensionByCertificateResponse,omitempty"` + Res *types.LoginExtensionByCertificateResponse `xml:"LoginExtensionByCertificateResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7445,7 +8185,7 @@ func LoginExtensionByCertificate(ctx context.Context, r soap.RoundTripper, req * type LoginExtensionBySubjectNameBody struct { Req *types.LoginExtensionBySubjectName `xml:"urn:vim25 LoginExtensionBySubjectName,omitempty"` - Res *types.LoginExtensionBySubjectNameResponse `xml:"urn:vim25 LoginExtensionBySubjectNameResponse,omitempty"` + Res *types.LoginExtensionBySubjectNameResponse `xml:"LoginExtensionBySubjectNameResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7465,7 +8205,7 @@ func LoginExtensionBySubjectName(ctx context.Context, r soap.RoundTripper, req * type LogoutBody struct { Req *types.Logout `xml:"urn:vim25 Logout,omitempty"` - Res *types.LogoutResponse `xml:"urn:vim25 LogoutResponse,omitempty"` + Res *types.LogoutResponse `xml:"LogoutResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7485,7 +8225,7 @@ func Logout(ctx context.Context, r soap.RoundTripper, req *types.Logout) (*types type LookupDvPortGroupBody struct { Req *types.LookupDvPortGroup `xml:"urn:vim25 LookupDvPortGroup,omitempty"` - Res *types.LookupDvPortGroupResponse `xml:"urn:vim25 LookupDvPortGroupResponse,omitempty"` + Res *types.LookupDvPortGroupResponse `xml:"LookupDvPortGroupResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7505,7 +8245,7 @@ func LookupDvPortGroup(ctx context.Context, r soap.RoundTripper, req *types.Look type LookupVmOverheadMemoryBody struct { Req *types.LookupVmOverheadMemory `xml:"urn:vim25 LookupVmOverheadMemory,omitempty"` - Res *types.LookupVmOverheadMemoryResponse `xml:"urn:vim25 LookupVmOverheadMemoryResponse,omitempty"` + Res *types.LookupVmOverheadMemoryResponse `xml:"LookupVmOverheadMemoryResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7525,7 +8265,7 @@ func LookupVmOverheadMemory(ctx context.Context, r soap.RoundTripper, req *types type MakeDirectoryBody struct { Req *types.MakeDirectory `xml:"urn:vim25 MakeDirectory,omitempty"` - Res *types.MakeDirectoryResponse `xml:"urn:vim25 MakeDirectoryResponse,omitempty"` + Res *types.MakeDirectoryResponse `xml:"MakeDirectoryResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7545,7 +8285,7 @@ func MakeDirectory(ctx context.Context, r soap.RoundTripper, req *types.MakeDire type MakeDirectoryInGuestBody struct { Req *types.MakeDirectoryInGuest `xml:"urn:vim25 MakeDirectoryInGuest,omitempty"` - Res *types.MakeDirectoryInGuestResponse `xml:"urn:vim25 MakeDirectoryInGuestResponse,omitempty"` + Res *types.MakeDirectoryInGuestResponse `xml:"MakeDirectoryInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7565,7 +8305,7 @@ func MakeDirectoryInGuest(ctx context.Context, r soap.RoundTripper, req *types.M type MakePrimaryVM_TaskBody struct { Req *types.MakePrimaryVM_Task `xml:"urn:vim25 MakePrimaryVM_Task,omitempty"` - Res *types.MakePrimaryVM_TaskResponse `xml:"urn:vim25 MakePrimaryVM_TaskResponse,omitempty"` + Res *types.MakePrimaryVM_TaskResponse `xml:"MakePrimaryVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7585,7 +8325,7 @@ func MakePrimaryVM_Task(ctx context.Context, r soap.RoundTripper, req *types.Mak type MarkAsLocal_TaskBody struct { Req *types.MarkAsLocal_Task `xml:"urn:vim25 MarkAsLocal_Task,omitempty"` - Res *types.MarkAsLocal_TaskResponse `xml:"urn:vim25 MarkAsLocal_TaskResponse,omitempty"` + Res *types.MarkAsLocal_TaskResponse `xml:"MarkAsLocal_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7605,7 +8345,7 @@ func MarkAsLocal_Task(ctx context.Context, r soap.RoundTripper, req *types.MarkA type MarkAsNonLocal_TaskBody struct { Req *types.MarkAsNonLocal_Task `xml:"urn:vim25 MarkAsNonLocal_Task,omitempty"` - Res *types.MarkAsNonLocal_TaskResponse `xml:"urn:vim25 MarkAsNonLocal_TaskResponse,omitempty"` + Res *types.MarkAsNonLocal_TaskResponse `xml:"MarkAsNonLocal_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7625,7 +8365,7 @@ func MarkAsNonLocal_Task(ctx context.Context, r soap.RoundTripper, req *types.Ma type MarkAsNonSsd_TaskBody struct { Req *types.MarkAsNonSsd_Task `xml:"urn:vim25 MarkAsNonSsd_Task,omitempty"` - Res *types.MarkAsNonSsd_TaskResponse `xml:"urn:vim25 MarkAsNonSsd_TaskResponse,omitempty"` + Res *types.MarkAsNonSsd_TaskResponse `xml:"MarkAsNonSsd_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7645,7 +8385,7 @@ func MarkAsNonSsd_Task(ctx context.Context, r soap.RoundTripper, req *types.Mark type MarkAsSsd_TaskBody struct { Req *types.MarkAsSsd_Task `xml:"urn:vim25 MarkAsSsd_Task,omitempty"` - Res *types.MarkAsSsd_TaskResponse `xml:"urn:vim25 MarkAsSsd_TaskResponse,omitempty"` + Res *types.MarkAsSsd_TaskResponse `xml:"MarkAsSsd_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7665,7 +8405,7 @@ func MarkAsSsd_Task(ctx context.Context, r soap.RoundTripper, req *types.MarkAsS type MarkAsTemplateBody struct { Req *types.MarkAsTemplate `xml:"urn:vim25 MarkAsTemplate,omitempty"` - Res *types.MarkAsTemplateResponse `xml:"urn:vim25 MarkAsTemplateResponse,omitempty"` + Res *types.MarkAsTemplateResponse `xml:"MarkAsTemplateResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7685,7 +8425,7 @@ func MarkAsTemplate(ctx context.Context, r soap.RoundTripper, req *types.MarkAsT type MarkAsVirtualMachineBody struct { Req *types.MarkAsVirtualMachine `xml:"urn:vim25 MarkAsVirtualMachine,omitempty"` - Res *types.MarkAsVirtualMachineResponse `xml:"urn:vim25 MarkAsVirtualMachineResponse,omitempty"` + Res *types.MarkAsVirtualMachineResponse `xml:"MarkAsVirtualMachineResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7705,7 +8445,7 @@ func MarkAsVirtualMachine(ctx context.Context, r soap.RoundTripper, req *types.M type MarkDefaultBody struct { Req *types.MarkDefault `xml:"urn:vim25 MarkDefault,omitempty"` - Res *types.MarkDefaultResponse `xml:"urn:vim25 MarkDefaultResponse,omitempty"` + Res *types.MarkDefaultResponse `xml:"MarkDefaultResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7725,7 +8465,7 @@ func MarkDefault(ctx context.Context, r soap.RoundTripper, req *types.MarkDefaul type MarkForRemovalBody struct { Req *types.MarkForRemoval `xml:"urn:vim25 MarkForRemoval,omitempty"` - Res *types.MarkForRemovalResponse `xml:"urn:vim25 MarkForRemovalResponse,omitempty"` + Res *types.MarkForRemovalResponse `xml:"MarkForRemovalResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7743,9 +8483,69 @@ func MarkForRemoval(ctx context.Context, r soap.RoundTripper, req *types.MarkFor return resBody.Res, nil } +type MarkPerenniallyReservedBody struct { + Req *types.MarkPerenniallyReserved `xml:"urn:vim25 MarkPerenniallyReserved,omitempty"` + Res *types.MarkPerenniallyReservedResponse `xml:"MarkPerenniallyReservedResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MarkPerenniallyReservedBody) Fault() *soap.Fault { return b.Fault_ } + +func MarkPerenniallyReserved(ctx context.Context, r soap.RoundTripper, req *types.MarkPerenniallyReserved) (*types.MarkPerenniallyReservedResponse, error) { + var reqBody, resBody MarkPerenniallyReservedBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MarkPerenniallyReservedEx_TaskBody struct { + Req *types.MarkPerenniallyReservedEx_Task `xml:"urn:vim25 MarkPerenniallyReservedEx_Task,omitempty"` + Res *types.MarkPerenniallyReservedEx_TaskResponse `xml:"MarkPerenniallyReservedEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MarkPerenniallyReservedEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func MarkPerenniallyReservedEx_Task(ctx context.Context, r soap.RoundTripper, req *types.MarkPerenniallyReservedEx_Task) (*types.MarkPerenniallyReservedEx_TaskResponse, error) { + var reqBody, resBody MarkPerenniallyReservedEx_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MarkServiceProviderEntitiesBody struct { + Req *types.MarkServiceProviderEntities `xml:"urn:vim25 MarkServiceProviderEntities,omitempty"` + Res *types.MarkServiceProviderEntitiesResponse `xml:"MarkServiceProviderEntitiesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MarkServiceProviderEntitiesBody) Fault() *soap.Fault { return b.Fault_ } + +func MarkServiceProviderEntities(ctx context.Context, r soap.RoundTripper, req *types.MarkServiceProviderEntities) (*types.MarkServiceProviderEntitiesResponse, error) { + var reqBody, resBody MarkServiceProviderEntitiesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type MergeDvs_TaskBody struct { Req *types.MergeDvs_Task `xml:"urn:vim25 MergeDvs_Task,omitempty"` - Res *types.MergeDvs_TaskResponse `xml:"urn:vim25 MergeDvs_TaskResponse,omitempty"` + Res *types.MergeDvs_TaskResponse `xml:"MergeDvs_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7765,7 +8565,7 @@ func MergeDvs_Task(ctx context.Context, r soap.RoundTripper, req *types.MergeDvs type MergePermissionsBody struct { Req *types.MergePermissions `xml:"urn:vim25 MergePermissions,omitempty"` - Res *types.MergePermissionsResponse `xml:"urn:vim25 MergePermissionsResponse,omitempty"` + Res *types.MergePermissionsResponse `xml:"MergePermissionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7785,7 +8585,7 @@ func MergePermissions(ctx context.Context, r soap.RoundTripper, req *types.Merge type MigrateVM_TaskBody struct { Req *types.MigrateVM_Task `xml:"urn:vim25 MigrateVM_Task,omitempty"` - Res *types.MigrateVM_TaskResponse `xml:"urn:vim25 MigrateVM_TaskResponse,omitempty"` + Res *types.MigrateVM_TaskResponse `xml:"MigrateVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7805,7 +8605,7 @@ func MigrateVM_Task(ctx context.Context, r soap.RoundTripper, req *types.Migrate type ModifyListViewBody struct { Req *types.ModifyListView `xml:"urn:vim25 ModifyListView,omitempty"` - Res *types.ModifyListViewResponse `xml:"urn:vim25 ModifyListViewResponse,omitempty"` + Res *types.ModifyListViewResponse `xml:"ModifyListViewResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7825,7 +8625,7 @@ func ModifyListView(ctx context.Context, r soap.RoundTripper, req *types.ModifyL type MountToolsInstallerBody struct { Req *types.MountToolsInstaller `xml:"urn:vim25 MountToolsInstaller,omitempty"` - Res *types.MountToolsInstallerResponse `xml:"urn:vim25 MountToolsInstallerResponse,omitempty"` + Res *types.MountToolsInstallerResponse `xml:"MountToolsInstallerResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7845,7 +8645,7 @@ func MountToolsInstaller(ctx context.Context, r soap.RoundTripper, req *types.Mo type MountVffsVolumeBody struct { Req *types.MountVffsVolume `xml:"urn:vim25 MountVffsVolume,omitempty"` - Res *types.MountVffsVolumeResponse `xml:"urn:vim25 MountVffsVolumeResponse,omitempty"` + Res *types.MountVffsVolumeResponse `xml:"MountVffsVolumeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7865,7 +8665,7 @@ func MountVffsVolume(ctx context.Context, r soap.RoundTripper, req *types.MountV type MountVmfsVolumeBody struct { Req *types.MountVmfsVolume `xml:"urn:vim25 MountVmfsVolume,omitempty"` - Res *types.MountVmfsVolumeResponse `xml:"urn:vim25 MountVmfsVolumeResponse,omitempty"` + Res *types.MountVmfsVolumeResponse `xml:"MountVmfsVolumeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7885,7 +8685,7 @@ func MountVmfsVolume(ctx context.Context, r soap.RoundTripper, req *types.MountV type MountVmfsVolumeEx_TaskBody struct { Req *types.MountVmfsVolumeEx_Task `xml:"urn:vim25 MountVmfsVolumeEx_Task,omitempty"` - Res *types.MountVmfsVolumeEx_TaskResponse `xml:"urn:vim25 MountVmfsVolumeEx_TaskResponse,omitempty"` + Res *types.MountVmfsVolumeEx_TaskResponse `xml:"MountVmfsVolumeEx_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7905,7 +8705,7 @@ func MountVmfsVolumeEx_Task(ctx context.Context, r soap.RoundTripper, req *types type MoveDVPort_TaskBody struct { Req *types.MoveDVPort_Task `xml:"urn:vim25 MoveDVPort_Task,omitempty"` - Res *types.MoveDVPort_TaskResponse `xml:"urn:vim25 MoveDVPort_TaskResponse,omitempty"` + Res *types.MoveDVPort_TaskResponse `xml:"MoveDVPort_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7925,7 +8725,7 @@ func MoveDVPort_Task(ctx context.Context, r soap.RoundTripper, req *types.MoveDV type MoveDatastoreFile_TaskBody struct { Req *types.MoveDatastoreFile_Task `xml:"urn:vim25 MoveDatastoreFile_Task,omitempty"` - Res *types.MoveDatastoreFile_TaskResponse `xml:"urn:vim25 MoveDatastoreFile_TaskResponse,omitempty"` + Res *types.MoveDatastoreFile_TaskResponse `xml:"MoveDatastoreFile_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7945,7 +8745,7 @@ func MoveDatastoreFile_Task(ctx context.Context, r soap.RoundTripper, req *types type MoveDirectoryInGuestBody struct { Req *types.MoveDirectoryInGuest `xml:"urn:vim25 MoveDirectoryInGuest,omitempty"` - Res *types.MoveDirectoryInGuestResponse `xml:"urn:vim25 MoveDirectoryInGuestResponse,omitempty"` + Res *types.MoveDirectoryInGuestResponse `xml:"MoveDirectoryInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7965,7 +8765,7 @@ func MoveDirectoryInGuest(ctx context.Context, r soap.RoundTripper, req *types.M type MoveFileInGuestBody struct { Req *types.MoveFileInGuest `xml:"urn:vim25 MoveFileInGuest,omitempty"` - Res *types.MoveFileInGuestResponse `xml:"urn:vim25 MoveFileInGuestResponse,omitempty"` + Res *types.MoveFileInGuestResponse `xml:"MoveFileInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7985,7 +8785,7 @@ func MoveFileInGuest(ctx context.Context, r soap.RoundTripper, req *types.MoveFi type MoveHostInto_TaskBody struct { Req *types.MoveHostInto_Task `xml:"urn:vim25 MoveHostInto_Task,omitempty"` - Res *types.MoveHostInto_TaskResponse `xml:"urn:vim25 MoveHostInto_TaskResponse,omitempty"` + Res *types.MoveHostInto_TaskResponse `xml:"MoveHostInto_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8005,7 +8805,7 @@ func MoveHostInto_Task(ctx context.Context, r soap.RoundTripper, req *types.Move type MoveIntoFolder_TaskBody struct { Req *types.MoveIntoFolder_Task `xml:"urn:vim25 MoveIntoFolder_Task,omitempty"` - Res *types.MoveIntoFolder_TaskResponse `xml:"urn:vim25 MoveIntoFolder_TaskResponse,omitempty"` + Res *types.MoveIntoFolder_TaskResponse `xml:"MoveIntoFolder_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8025,7 +8825,7 @@ func MoveIntoFolder_Task(ctx context.Context, r soap.RoundTripper, req *types.Mo type MoveIntoResourcePoolBody struct { Req *types.MoveIntoResourcePool `xml:"urn:vim25 MoveIntoResourcePool,omitempty"` - Res *types.MoveIntoResourcePoolResponse `xml:"urn:vim25 MoveIntoResourcePoolResponse,omitempty"` + Res *types.MoveIntoResourcePoolResponse `xml:"MoveIntoResourcePoolResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8045,7 +8845,7 @@ func MoveIntoResourcePool(ctx context.Context, r soap.RoundTripper, req *types.M type MoveInto_TaskBody struct { Req *types.MoveInto_Task `xml:"urn:vim25 MoveInto_Task,omitempty"` - Res *types.MoveInto_TaskResponse `xml:"urn:vim25 MoveInto_TaskResponse,omitempty"` + Res *types.MoveInto_TaskResponse `xml:"MoveInto_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8065,7 +8865,7 @@ func MoveInto_Task(ctx context.Context, r soap.RoundTripper, req *types.MoveInto type MoveVirtualDisk_TaskBody struct { Req *types.MoveVirtualDisk_Task `xml:"urn:vim25 MoveVirtualDisk_Task,omitempty"` - Res *types.MoveVirtualDisk_TaskResponse `xml:"urn:vim25 MoveVirtualDisk_TaskResponse,omitempty"` + Res *types.MoveVirtualDisk_TaskResponse `xml:"MoveVirtualDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8085,7 +8885,7 @@ func MoveVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.M type OpenInventoryViewFolderBody struct { Req *types.OpenInventoryViewFolder `xml:"urn:vim25 OpenInventoryViewFolder,omitempty"` - Res *types.OpenInventoryViewFolderResponse `xml:"urn:vim25 OpenInventoryViewFolderResponse,omitempty"` + Res *types.OpenInventoryViewFolderResponse `xml:"OpenInventoryViewFolderResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8105,7 +8905,7 @@ func OpenInventoryViewFolder(ctx context.Context, r soap.RoundTripper, req *type type OverwriteCustomizationSpecBody struct { Req *types.OverwriteCustomizationSpec `xml:"urn:vim25 OverwriteCustomizationSpec,omitempty"` - Res *types.OverwriteCustomizationSpecResponse `xml:"urn:vim25 OverwriteCustomizationSpecResponse,omitempty"` + Res *types.OverwriteCustomizationSpecResponse `xml:"OverwriteCustomizationSpecResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8125,7 +8925,7 @@ func OverwriteCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *t type ParseDescriptorBody struct { Req *types.ParseDescriptor `xml:"urn:vim25 ParseDescriptor,omitempty"` - Res *types.ParseDescriptorResponse `xml:"urn:vim25 ParseDescriptorResponse,omitempty"` + Res *types.ParseDescriptorResponse `xml:"ParseDescriptorResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8145,7 +8945,7 @@ func ParseDescriptor(ctx context.Context, r soap.RoundTripper, req *types.ParseD type PerformDvsProductSpecOperation_TaskBody struct { Req *types.PerformDvsProductSpecOperation_Task `xml:"urn:vim25 PerformDvsProductSpecOperation_Task,omitempty"` - Res *types.PerformDvsProductSpecOperation_TaskResponse `xml:"urn:vim25 PerformDvsProductSpecOperation_TaskResponse,omitempty"` + Res *types.PerformDvsProductSpecOperation_TaskResponse `xml:"PerformDvsProductSpecOperation_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8165,7 +8965,7 @@ func PerformDvsProductSpecOperation_Task(ctx context.Context, r soap.RoundTrippe type PerformVsanUpgradePreflightCheckBody struct { Req *types.PerformVsanUpgradePreflightCheck `xml:"urn:vim25 PerformVsanUpgradePreflightCheck,omitempty"` - Res *types.PerformVsanUpgradePreflightCheckResponse `xml:"urn:vim25 PerformVsanUpgradePreflightCheckResponse,omitempty"` + Res *types.PerformVsanUpgradePreflightCheckResponse `xml:"PerformVsanUpgradePreflightCheckResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8185,7 +8985,7 @@ func PerformVsanUpgradePreflightCheck(ctx context.Context, r soap.RoundTripper, type PerformVsanUpgrade_TaskBody struct { Req *types.PerformVsanUpgrade_Task `xml:"urn:vim25 PerformVsanUpgrade_Task,omitempty"` - Res *types.PerformVsanUpgrade_TaskResponse `xml:"urn:vim25 PerformVsanUpgrade_TaskResponse,omitempty"` + Res *types.PerformVsanUpgrade_TaskResponse `xml:"PerformVsanUpgrade_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8205,7 +9005,7 @@ func PerformVsanUpgrade_Task(ctx context.Context, r soap.RoundTripper, req *type type PlaceVmBody struct { Req *types.PlaceVm `xml:"urn:vim25 PlaceVm,omitempty"` - Res *types.PlaceVmResponse `xml:"urn:vim25 PlaceVmResponse,omitempty"` + Res *types.PlaceVmResponse `xml:"PlaceVmResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8225,7 +9025,7 @@ func PlaceVm(ctx context.Context, r soap.RoundTripper, req *types.PlaceVm) (*typ type PostEventBody struct { Req *types.PostEvent `xml:"urn:vim25 PostEvent,omitempty"` - Res *types.PostEventResponse `xml:"urn:vim25 PostEventResponse,omitempty"` + Res *types.PostEventResponse `xml:"PostEventResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8245,7 +9045,7 @@ func PostEvent(ctx context.Context, r soap.RoundTripper, req *types.PostEvent) ( type PostHealthUpdatesBody struct { Req *types.PostHealthUpdates `xml:"urn:vim25 PostHealthUpdates,omitempty"` - Res *types.PostHealthUpdatesResponse `xml:"urn:vim25 PostHealthUpdatesResponse,omitempty"` + Res *types.PostHealthUpdatesResponse `xml:"PostHealthUpdatesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8265,7 +9065,7 @@ func PostHealthUpdates(ctx context.Context, r soap.RoundTripper, req *types.Post type PowerDownHostToStandBy_TaskBody struct { Req *types.PowerDownHostToStandBy_Task `xml:"urn:vim25 PowerDownHostToStandBy_Task,omitempty"` - Res *types.PowerDownHostToStandBy_TaskResponse `xml:"urn:vim25 PowerDownHostToStandBy_TaskResponse,omitempty"` + Res *types.PowerDownHostToStandBy_TaskResponse `xml:"PowerDownHostToStandBy_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8285,7 +9085,7 @@ func PowerDownHostToStandBy_Task(ctx context.Context, r soap.RoundTripper, req * type PowerOffVApp_TaskBody struct { Req *types.PowerOffVApp_Task `xml:"urn:vim25 PowerOffVApp_Task,omitempty"` - Res *types.PowerOffVApp_TaskResponse `xml:"urn:vim25 PowerOffVApp_TaskResponse,omitempty"` + Res *types.PowerOffVApp_TaskResponse `xml:"PowerOffVApp_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8305,7 +9105,7 @@ func PowerOffVApp_Task(ctx context.Context, r soap.RoundTripper, req *types.Powe type PowerOffVM_TaskBody struct { Req *types.PowerOffVM_Task `xml:"urn:vim25 PowerOffVM_Task,omitempty"` - Res *types.PowerOffVM_TaskResponse `xml:"urn:vim25 PowerOffVM_TaskResponse,omitempty"` + Res *types.PowerOffVM_TaskResponse `xml:"PowerOffVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8325,7 +9125,7 @@ func PowerOffVM_Task(ctx context.Context, r soap.RoundTripper, req *types.PowerO type PowerOnMultiVM_TaskBody struct { Req *types.PowerOnMultiVM_Task `xml:"urn:vim25 PowerOnMultiVM_Task,omitempty"` - Res *types.PowerOnMultiVM_TaskResponse `xml:"urn:vim25 PowerOnMultiVM_TaskResponse,omitempty"` + Res *types.PowerOnMultiVM_TaskResponse `xml:"PowerOnMultiVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8345,7 +9145,7 @@ func PowerOnMultiVM_Task(ctx context.Context, r soap.RoundTripper, req *types.Po type PowerOnVApp_TaskBody struct { Req *types.PowerOnVApp_Task `xml:"urn:vim25 PowerOnVApp_Task,omitempty"` - Res *types.PowerOnVApp_TaskResponse `xml:"urn:vim25 PowerOnVApp_TaskResponse,omitempty"` + Res *types.PowerOnVApp_TaskResponse `xml:"PowerOnVApp_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8365,7 +9165,7 @@ func PowerOnVApp_Task(ctx context.Context, r soap.RoundTripper, req *types.Power type PowerOnVM_TaskBody struct { Req *types.PowerOnVM_Task `xml:"urn:vim25 PowerOnVM_Task,omitempty"` - Res *types.PowerOnVM_TaskResponse `xml:"urn:vim25 PowerOnVM_TaskResponse,omitempty"` + Res *types.PowerOnVM_TaskResponse `xml:"PowerOnVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8385,7 +9185,7 @@ func PowerOnVM_Task(ctx context.Context, r soap.RoundTripper, req *types.PowerOn type PowerUpHostFromStandBy_TaskBody struct { Req *types.PowerUpHostFromStandBy_Task `xml:"urn:vim25 PowerUpHostFromStandBy_Task,omitempty"` - Res *types.PowerUpHostFromStandBy_TaskResponse `xml:"urn:vim25 PowerUpHostFromStandBy_TaskResponse,omitempty"` + Res *types.PowerUpHostFromStandBy_TaskResponse `xml:"PowerUpHostFromStandBy_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8405,7 +9205,7 @@ func PowerUpHostFromStandBy_Task(ctx context.Context, r soap.RoundTripper, req * type PrepareCryptoBody struct { Req *types.PrepareCrypto `xml:"urn:vim25 PrepareCrypto,omitempty"` - Res *types.PrepareCryptoResponse `xml:"urn:vim25 PrepareCryptoResponse,omitempty"` + Res *types.PrepareCryptoResponse `xml:"PrepareCryptoResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8425,7 +9225,7 @@ func PrepareCrypto(ctx context.Context, r soap.RoundTripper, req *types.PrepareC type PromoteDisks_TaskBody struct { Req *types.PromoteDisks_Task `xml:"urn:vim25 PromoteDisks_Task,omitempty"` - Res *types.PromoteDisks_TaskResponse `xml:"urn:vim25 PromoteDisks_TaskResponse,omitempty"` + Res *types.PromoteDisks_TaskResponse `xml:"PromoteDisks_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8445,7 +9245,7 @@ func PromoteDisks_Task(ctx context.Context, r soap.RoundTripper, req *types.Prom type PutUsbScanCodesBody struct { Req *types.PutUsbScanCodes `xml:"urn:vim25 PutUsbScanCodes,omitempty"` - Res *types.PutUsbScanCodesResponse `xml:"urn:vim25 PutUsbScanCodesResponse,omitempty"` + Res *types.PutUsbScanCodesResponse `xml:"PutUsbScanCodesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8465,7 +9265,7 @@ func PutUsbScanCodes(ctx context.Context, r soap.RoundTripper, req *types.PutUsb type QueryAnswerFileStatusBody struct { Req *types.QueryAnswerFileStatus `xml:"urn:vim25 QueryAnswerFileStatus,omitempty"` - Res *types.QueryAnswerFileStatusResponse `xml:"urn:vim25 QueryAnswerFileStatusResponse,omitempty"` + Res *types.QueryAnswerFileStatusResponse `xml:"QueryAnswerFileStatusResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8485,7 +9285,7 @@ func QueryAnswerFileStatus(ctx context.Context, r soap.RoundTripper, req *types. type QueryAssignedLicensesBody struct { Req *types.QueryAssignedLicenses `xml:"urn:vim25 QueryAssignedLicenses,omitempty"` - Res *types.QueryAssignedLicensesResponse `xml:"urn:vim25 QueryAssignedLicensesResponse,omitempty"` + Res *types.QueryAssignedLicensesResponse `xml:"QueryAssignedLicensesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8505,7 +9305,7 @@ func QueryAssignedLicenses(ctx context.Context, r soap.RoundTripper, req *types. type QueryAvailableDisksForVmfsBody struct { Req *types.QueryAvailableDisksForVmfs `xml:"urn:vim25 QueryAvailableDisksForVmfs,omitempty"` - Res *types.QueryAvailableDisksForVmfsResponse `xml:"urn:vim25 QueryAvailableDisksForVmfsResponse,omitempty"` + Res *types.QueryAvailableDisksForVmfsResponse `xml:"QueryAvailableDisksForVmfsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8525,7 +9325,7 @@ func QueryAvailableDisksForVmfs(ctx context.Context, r soap.RoundTripper, req *t type QueryAvailableDvsSpecBody struct { Req *types.QueryAvailableDvsSpec `xml:"urn:vim25 QueryAvailableDvsSpec,omitempty"` - Res *types.QueryAvailableDvsSpecResponse `xml:"urn:vim25 QueryAvailableDvsSpecResponse,omitempty"` + Res *types.QueryAvailableDvsSpecResponse `xml:"QueryAvailableDvsSpecResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8545,7 +9345,7 @@ func QueryAvailableDvsSpec(ctx context.Context, r soap.RoundTripper, req *types. type QueryAvailablePartitionBody struct { Req *types.QueryAvailablePartition `xml:"urn:vim25 QueryAvailablePartition,omitempty"` - Res *types.QueryAvailablePartitionResponse `xml:"urn:vim25 QueryAvailablePartitionResponse,omitempty"` + Res *types.QueryAvailablePartitionResponse `xml:"QueryAvailablePartitionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8565,7 +9365,7 @@ func QueryAvailablePartition(ctx context.Context, r soap.RoundTripper, req *type type QueryAvailablePerfMetricBody struct { Req *types.QueryAvailablePerfMetric `xml:"urn:vim25 QueryAvailablePerfMetric,omitempty"` - Res *types.QueryAvailablePerfMetricResponse `xml:"urn:vim25 QueryAvailablePerfMetricResponse,omitempty"` + Res *types.QueryAvailablePerfMetricResponse `xml:"QueryAvailablePerfMetricResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8585,7 +9385,7 @@ func QueryAvailablePerfMetric(ctx context.Context, r soap.RoundTripper, req *typ type QueryAvailableSsdsBody struct { Req *types.QueryAvailableSsds `xml:"urn:vim25 QueryAvailableSsds,omitempty"` - Res *types.QueryAvailableSsdsResponse `xml:"urn:vim25 QueryAvailableSsdsResponse,omitempty"` + Res *types.QueryAvailableSsdsResponse `xml:"QueryAvailableSsdsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8605,7 +9405,7 @@ func QueryAvailableSsds(ctx context.Context, r soap.RoundTripper, req *types.Que type QueryAvailableTimeZonesBody struct { Req *types.QueryAvailableTimeZones `xml:"urn:vim25 QueryAvailableTimeZones,omitempty"` - Res *types.QueryAvailableTimeZonesResponse `xml:"urn:vim25 QueryAvailableTimeZonesResponse,omitempty"` + Res *types.QueryAvailableTimeZonesResponse `xml:"QueryAvailableTimeZonesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8625,7 +9425,7 @@ func QueryAvailableTimeZones(ctx context.Context, r soap.RoundTripper, req *type type QueryBootDevicesBody struct { Req *types.QueryBootDevices `xml:"urn:vim25 QueryBootDevices,omitempty"` - Res *types.QueryBootDevicesResponse `xml:"urn:vim25 QueryBootDevicesResponse,omitempty"` + Res *types.QueryBootDevicesResponse `xml:"QueryBootDevicesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8645,7 +9445,7 @@ func QueryBootDevices(ctx context.Context, r soap.RoundTripper, req *types.Query type QueryBoundVnicsBody struct { Req *types.QueryBoundVnics `xml:"urn:vim25 QueryBoundVnics,omitempty"` - Res *types.QueryBoundVnicsResponse `xml:"urn:vim25 QueryBoundVnicsResponse,omitempty"` + Res *types.QueryBoundVnicsResponse `xml:"QueryBoundVnicsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8665,7 +9465,7 @@ func QueryBoundVnics(ctx context.Context, r soap.RoundTripper, req *types.QueryB type QueryCandidateNicsBody struct { Req *types.QueryCandidateNics `xml:"urn:vim25 QueryCandidateNics,omitempty"` - Res *types.QueryCandidateNicsResponse `xml:"urn:vim25 QueryCandidateNicsResponse,omitempty"` + Res *types.QueryCandidateNicsResponse `xml:"QueryCandidateNicsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8685,7 +9485,7 @@ func QueryCandidateNics(ctx context.Context, r soap.RoundTripper, req *types.Que type QueryChangedDiskAreasBody struct { Req *types.QueryChangedDiskAreas `xml:"urn:vim25 QueryChangedDiskAreas,omitempty"` - Res *types.QueryChangedDiskAreasResponse `xml:"urn:vim25 QueryChangedDiskAreasResponse,omitempty"` + Res *types.QueryChangedDiskAreasResponse `xml:"QueryChangedDiskAreasResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8705,7 +9505,7 @@ func QueryChangedDiskAreas(ctx context.Context, r soap.RoundTripper, req *types. type QueryCmmdsBody struct { Req *types.QueryCmmds `xml:"urn:vim25 QueryCmmds,omitempty"` - Res *types.QueryCmmdsResponse `xml:"urn:vim25 QueryCmmdsResponse,omitempty"` + Res *types.QueryCmmdsResponse `xml:"QueryCmmdsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8725,7 +9525,7 @@ func QueryCmmds(ctx context.Context, r soap.RoundTripper, req *types.QueryCmmds) type QueryCompatibleHostForExistingDvsBody struct { Req *types.QueryCompatibleHostForExistingDvs `xml:"urn:vim25 QueryCompatibleHostForExistingDvs,omitempty"` - Res *types.QueryCompatibleHostForExistingDvsResponse `xml:"urn:vim25 QueryCompatibleHostForExistingDvsResponse,omitempty"` + Res *types.QueryCompatibleHostForExistingDvsResponse `xml:"QueryCompatibleHostForExistingDvsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8745,7 +9545,7 @@ func QueryCompatibleHostForExistingDvs(ctx context.Context, r soap.RoundTripper, type QueryCompatibleHostForNewDvsBody struct { Req *types.QueryCompatibleHostForNewDvs `xml:"urn:vim25 QueryCompatibleHostForNewDvs,omitempty"` - Res *types.QueryCompatibleHostForNewDvsResponse `xml:"urn:vim25 QueryCompatibleHostForNewDvsResponse,omitempty"` + Res *types.QueryCompatibleHostForNewDvsResponse `xml:"QueryCompatibleHostForNewDvsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8763,9 +9563,29 @@ func QueryCompatibleHostForNewDvs(ctx context.Context, r soap.RoundTripper, req return resBody.Res, nil } +type QueryCompatibleVmnicsFromHostsBody struct { + Req *types.QueryCompatibleVmnicsFromHosts `xml:"urn:vim25 QueryCompatibleVmnicsFromHosts,omitempty"` + Res *types.QueryCompatibleVmnicsFromHostsResponse `xml:"QueryCompatibleVmnicsFromHostsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryCompatibleVmnicsFromHostsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryCompatibleVmnicsFromHosts(ctx context.Context, r soap.RoundTripper, req *types.QueryCompatibleVmnicsFromHosts) (*types.QueryCompatibleVmnicsFromHostsResponse, error) { + var reqBody, resBody QueryCompatibleVmnicsFromHostsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type QueryComplianceStatusBody struct { Req *types.QueryComplianceStatus `xml:"urn:vim25 QueryComplianceStatus,omitempty"` - Res *types.QueryComplianceStatusResponse `xml:"urn:vim25 QueryComplianceStatusResponse,omitempty"` + Res *types.QueryComplianceStatusResponse `xml:"QueryComplianceStatusResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8785,7 +9605,7 @@ func QueryComplianceStatus(ctx context.Context, r soap.RoundTripper, req *types. type QueryConfigOptionBody struct { Req *types.QueryConfigOption `xml:"urn:vim25 QueryConfigOption,omitempty"` - Res *types.QueryConfigOptionResponse `xml:"urn:vim25 QueryConfigOptionResponse,omitempty"` + Res *types.QueryConfigOptionResponse `xml:"QueryConfigOptionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8805,7 +9625,7 @@ func QueryConfigOption(ctx context.Context, r soap.RoundTripper, req *types.Quer type QueryConfigOptionDescriptorBody struct { Req *types.QueryConfigOptionDescriptor `xml:"urn:vim25 QueryConfigOptionDescriptor,omitempty"` - Res *types.QueryConfigOptionDescriptorResponse `xml:"urn:vim25 QueryConfigOptionDescriptorResponse,omitempty"` + Res *types.QueryConfigOptionDescriptorResponse `xml:"QueryConfigOptionDescriptorResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8825,7 +9645,7 @@ func QueryConfigOptionDescriptor(ctx context.Context, r soap.RoundTripper, req * type QueryConfigOptionExBody struct { Req *types.QueryConfigOptionEx `xml:"urn:vim25 QueryConfigOptionEx,omitempty"` - Res *types.QueryConfigOptionExResponse `xml:"urn:vim25 QueryConfigOptionExResponse,omitempty"` + Res *types.QueryConfigOptionExResponse `xml:"QueryConfigOptionExResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8845,7 +9665,7 @@ func QueryConfigOptionEx(ctx context.Context, r soap.RoundTripper, req *types.Qu type QueryConfigTargetBody struct { Req *types.QueryConfigTarget `xml:"urn:vim25 QueryConfigTarget,omitempty"` - Res *types.QueryConfigTargetResponse `xml:"urn:vim25 QueryConfigTargetResponse,omitempty"` + Res *types.QueryConfigTargetResponse `xml:"QueryConfigTargetResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8865,7 +9685,7 @@ func QueryConfigTarget(ctx context.Context, r soap.RoundTripper, req *types.Quer type QueryConfiguredModuleOptionStringBody struct { Req *types.QueryConfiguredModuleOptionString `xml:"urn:vim25 QueryConfiguredModuleOptionString,omitempty"` - Res *types.QueryConfiguredModuleOptionStringResponse `xml:"urn:vim25 QueryConfiguredModuleOptionStringResponse,omitempty"` + Res *types.QueryConfiguredModuleOptionStringResponse `xml:"QueryConfiguredModuleOptionStringResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8885,7 +9705,7 @@ func QueryConfiguredModuleOptionString(ctx context.Context, r soap.RoundTripper, type QueryConnectionInfoBody struct { Req *types.QueryConnectionInfo `xml:"urn:vim25 QueryConnectionInfo,omitempty"` - Res *types.QueryConnectionInfoResponse `xml:"urn:vim25 QueryConnectionInfoResponse,omitempty"` + Res *types.QueryConnectionInfoResponse `xml:"QueryConnectionInfoResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8905,7 +9725,7 @@ func QueryConnectionInfo(ctx context.Context, r soap.RoundTripper, req *types.Qu type QueryConnectionInfoViaSpecBody struct { Req *types.QueryConnectionInfoViaSpec `xml:"urn:vim25 QueryConnectionInfoViaSpec,omitempty"` - Res *types.QueryConnectionInfoViaSpecResponse `xml:"urn:vim25 QueryConnectionInfoViaSpecResponse,omitempty"` + Res *types.QueryConnectionInfoViaSpecResponse `xml:"QueryConnectionInfoViaSpecResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8923,9 +9743,49 @@ func QueryConnectionInfoViaSpec(ctx context.Context, r soap.RoundTripper, req *t return resBody.Res, nil } +type QueryConnectionsBody struct { + Req *types.QueryConnections `xml:"urn:vim25 QueryConnections,omitempty"` + Res *types.QueryConnectionsResponse `xml:"QueryConnectionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryConnectionsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryConnections(ctx context.Context, r soap.RoundTripper, req *types.QueryConnections) (*types.QueryConnectionsResponse, error) { + var reqBody, resBody QueryConnectionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryCryptoKeyStatusBody struct { + Req *types.QueryCryptoKeyStatus `xml:"urn:vim25 QueryCryptoKeyStatus,omitempty"` + Res *types.QueryCryptoKeyStatusResponse `xml:"QueryCryptoKeyStatusResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryCryptoKeyStatusBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryCryptoKeyStatus(ctx context.Context, r soap.RoundTripper, req *types.QueryCryptoKeyStatus) (*types.QueryCryptoKeyStatusResponse, error) { + var reqBody, resBody QueryCryptoKeyStatusBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type QueryDatastorePerformanceSummaryBody struct { Req *types.QueryDatastorePerformanceSummary `xml:"urn:vim25 QueryDatastorePerformanceSummary,omitempty"` - Res *types.QueryDatastorePerformanceSummaryResponse `xml:"urn:vim25 QueryDatastorePerformanceSummaryResponse,omitempty"` + Res *types.QueryDatastorePerformanceSummaryResponse `xml:"QueryDatastorePerformanceSummaryResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8945,7 +9805,7 @@ func QueryDatastorePerformanceSummary(ctx context.Context, r soap.RoundTripper, type QueryDateTimeBody struct { Req *types.QueryDateTime `xml:"urn:vim25 QueryDateTime,omitempty"` - Res *types.QueryDateTimeResponse `xml:"urn:vim25 QueryDateTimeResponse,omitempty"` + Res *types.QueryDateTimeResponse `xml:"QueryDateTimeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8965,7 +9825,7 @@ func QueryDateTime(ctx context.Context, r soap.RoundTripper, req *types.QueryDat type QueryDescriptionsBody struct { Req *types.QueryDescriptions `xml:"urn:vim25 QueryDescriptions,omitempty"` - Res *types.QueryDescriptionsResponse `xml:"urn:vim25 QueryDescriptionsResponse,omitempty"` + Res *types.QueryDescriptionsResponse `xml:"QueryDescriptionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8985,7 +9845,7 @@ func QueryDescriptions(ctx context.Context, r soap.RoundTripper, req *types.Quer type QueryDisksForVsanBody struct { Req *types.QueryDisksForVsan `xml:"urn:vim25 QueryDisksForVsan,omitempty"` - Res *types.QueryDisksForVsanResponse `xml:"urn:vim25 QueryDisksForVsanResponse,omitempty"` + Res *types.QueryDisksForVsanResponse `xml:"QueryDisksForVsanResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9005,7 +9865,7 @@ func QueryDisksForVsan(ctx context.Context, r soap.RoundTripper, req *types.Quer type QueryDisksUsingFilterBody struct { Req *types.QueryDisksUsingFilter `xml:"urn:vim25 QueryDisksUsingFilter,omitempty"` - Res *types.QueryDisksUsingFilterResponse `xml:"urn:vim25 QueryDisksUsingFilterResponse,omitempty"` + Res *types.QueryDisksUsingFilterResponse `xml:"QueryDisksUsingFilterResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9025,7 +9885,7 @@ func QueryDisksUsingFilter(ctx context.Context, r soap.RoundTripper, req *types. type QueryDvsByUuidBody struct { Req *types.QueryDvsByUuid `xml:"urn:vim25 QueryDvsByUuid,omitempty"` - Res *types.QueryDvsByUuidResponse `xml:"urn:vim25 QueryDvsByUuidResponse,omitempty"` + Res *types.QueryDvsByUuidResponse `xml:"QueryDvsByUuidResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9045,7 +9905,7 @@ func QueryDvsByUuid(ctx context.Context, r soap.RoundTripper, req *types.QueryDv type QueryDvsCheckCompatibilityBody struct { Req *types.QueryDvsCheckCompatibility `xml:"urn:vim25 QueryDvsCheckCompatibility,omitempty"` - Res *types.QueryDvsCheckCompatibilityResponse `xml:"urn:vim25 QueryDvsCheckCompatibilityResponse,omitempty"` + Res *types.QueryDvsCheckCompatibilityResponse `xml:"QueryDvsCheckCompatibilityResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9065,7 +9925,7 @@ func QueryDvsCheckCompatibility(ctx context.Context, r soap.RoundTripper, req *t type QueryDvsCompatibleHostSpecBody struct { Req *types.QueryDvsCompatibleHostSpec `xml:"urn:vim25 QueryDvsCompatibleHostSpec,omitempty"` - Res *types.QueryDvsCompatibleHostSpecResponse `xml:"urn:vim25 QueryDvsCompatibleHostSpecResponse,omitempty"` + Res *types.QueryDvsCompatibleHostSpecResponse `xml:"QueryDvsCompatibleHostSpecResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9085,7 +9945,7 @@ func QueryDvsCompatibleHostSpec(ctx context.Context, r soap.RoundTripper, req *t type QueryDvsConfigTargetBody struct { Req *types.QueryDvsConfigTarget `xml:"urn:vim25 QueryDvsConfigTarget,omitempty"` - Res *types.QueryDvsConfigTargetResponse `xml:"urn:vim25 QueryDvsConfigTargetResponse,omitempty"` + Res *types.QueryDvsConfigTargetResponse `xml:"QueryDvsConfigTargetResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9105,7 +9965,7 @@ func QueryDvsConfigTarget(ctx context.Context, r soap.RoundTripper, req *types.Q type QueryDvsFeatureCapabilityBody struct { Req *types.QueryDvsFeatureCapability `xml:"urn:vim25 QueryDvsFeatureCapability,omitempty"` - Res *types.QueryDvsFeatureCapabilityResponse `xml:"urn:vim25 QueryDvsFeatureCapabilityResponse,omitempty"` + Res *types.QueryDvsFeatureCapabilityResponse `xml:"QueryDvsFeatureCapabilityResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9125,7 +9985,7 @@ func QueryDvsFeatureCapability(ctx context.Context, r soap.RoundTripper, req *ty type QueryEventsBody struct { Req *types.QueryEvents `xml:"urn:vim25 QueryEvents,omitempty"` - Res *types.QueryEventsResponse `xml:"urn:vim25 QueryEventsResponse,omitempty"` + Res *types.QueryEventsResponse `xml:"QueryEventsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9145,7 +10005,7 @@ func QueryEvents(ctx context.Context, r soap.RoundTripper, req *types.QueryEvent type QueryExpressionMetadataBody struct { Req *types.QueryExpressionMetadata `xml:"urn:vim25 QueryExpressionMetadata,omitempty"` - Res *types.QueryExpressionMetadataResponse `xml:"urn:vim25 QueryExpressionMetadataResponse,omitempty"` + Res *types.QueryExpressionMetadataResponse `xml:"QueryExpressionMetadataResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9165,7 +10025,7 @@ func QueryExpressionMetadata(ctx context.Context, r soap.RoundTripper, req *type type QueryExtensionIpAllocationUsageBody struct { Req *types.QueryExtensionIpAllocationUsage `xml:"urn:vim25 QueryExtensionIpAllocationUsage,omitempty"` - Res *types.QueryExtensionIpAllocationUsageResponse `xml:"urn:vim25 QueryExtensionIpAllocationUsageResponse,omitempty"` + Res *types.QueryExtensionIpAllocationUsageResponse `xml:"QueryExtensionIpAllocationUsageResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9185,7 +10045,7 @@ func QueryExtensionIpAllocationUsage(ctx context.Context, r soap.RoundTripper, r type QueryFaultToleranceCompatibilityBody struct { Req *types.QueryFaultToleranceCompatibility `xml:"urn:vim25 QueryFaultToleranceCompatibility,omitempty"` - Res *types.QueryFaultToleranceCompatibilityResponse `xml:"urn:vim25 QueryFaultToleranceCompatibilityResponse,omitempty"` + Res *types.QueryFaultToleranceCompatibilityResponse `xml:"QueryFaultToleranceCompatibilityResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9205,7 +10065,7 @@ func QueryFaultToleranceCompatibility(ctx context.Context, r soap.RoundTripper, type QueryFaultToleranceCompatibilityExBody struct { Req *types.QueryFaultToleranceCompatibilityEx `xml:"urn:vim25 QueryFaultToleranceCompatibilityEx,omitempty"` - Res *types.QueryFaultToleranceCompatibilityExResponse `xml:"urn:vim25 QueryFaultToleranceCompatibilityExResponse,omitempty"` + Res *types.QueryFaultToleranceCompatibilityExResponse `xml:"QueryFaultToleranceCompatibilityExResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9225,7 +10085,7 @@ func QueryFaultToleranceCompatibilityEx(ctx context.Context, r soap.RoundTripper type QueryFilterEntitiesBody struct { Req *types.QueryFilterEntities `xml:"urn:vim25 QueryFilterEntities,omitempty"` - Res *types.QueryFilterEntitiesResponse `xml:"urn:vim25 QueryFilterEntitiesResponse,omitempty"` + Res *types.QueryFilterEntitiesResponse `xml:"QueryFilterEntitiesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9245,7 +10105,7 @@ func QueryFilterEntities(ctx context.Context, r soap.RoundTripper, req *types.Qu type QueryFilterInfoIdsBody struct { Req *types.QueryFilterInfoIds `xml:"urn:vim25 QueryFilterInfoIds,omitempty"` - Res *types.QueryFilterInfoIdsResponse `xml:"urn:vim25 QueryFilterInfoIdsResponse,omitempty"` + Res *types.QueryFilterInfoIdsResponse `xml:"QueryFilterInfoIdsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9265,7 +10125,7 @@ func QueryFilterInfoIds(ctx context.Context, r soap.RoundTripper, req *types.Que type QueryFilterListBody struct { Req *types.QueryFilterList `xml:"urn:vim25 QueryFilterList,omitempty"` - Res *types.QueryFilterListResponse `xml:"urn:vim25 QueryFilterListResponse,omitempty"` + Res *types.QueryFilterListResponse `xml:"QueryFilterListResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9285,7 +10145,7 @@ func QueryFilterList(ctx context.Context, r soap.RoundTripper, req *types.QueryF type QueryFilterNameBody struct { Req *types.QueryFilterName `xml:"urn:vim25 QueryFilterName,omitempty"` - Res *types.QueryFilterNameResponse `xml:"urn:vim25 QueryFilterNameResponse,omitempty"` + Res *types.QueryFilterNameResponse `xml:"QueryFilterNameResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9305,7 +10165,7 @@ func QueryFilterName(ctx context.Context, r soap.RoundTripper, req *types.QueryF type QueryFirmwareConfigUploadURLBody struct { Req *types.QueryFirmwareConfigUploadURL `xml:"urn:vim25 QueryFirmwareConfigUploadURL,omitempty"` - Res *types.QueryFirmwareConfigUploadURLResponse `xml:"urn:vim25 QueryFirmwareConfigUploadURLResponse,omitempty"` + Res *types.QueryFirmwareConfigUploadURLResponse `xml:"QueryFirmwareConfigUploadURLResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9325,7 +10185,7 @@ func QueryFirmwareConfigUploadURL(ctx context.Context, r soap.RoundTripper, req type QueryHealthUpdateInfosBody struct { Req *types.QueryHealthUpdateInfos `xml:"urn:vim25 QueryHealthUpdateInfos,omitempty"` - Res *types.QueryHealthUpdateInfosResponse `xml:"urn:vim25 QueryHealthUpdateInfosResponse,omitempty"` + Res *types.QueryHealthUpdateInfosResponse `xml:"QueryHealthUpdateInfosResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9345,7 +10205,7 @@ func QueryHealthUpdateInfos(ctx context.Context, r soap.RoundTripper, req *types type QueryHealthUpdatesBody struct { Req *types.QueryHealthUpdates `xml:"urn:vim25 QueryHealthUpdates,omitempty"` - Res *types.QueryHealthUpdatesResponse `xml:"urn:vim25 QueryHealthUpdatesResponse,omitempty"` + Res *types.QueryHealthUpdatesResponse `xml:"QueryHealthUpdatesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9365,7 +10225,7 @@ func QueryHealthUpdates(ctx context.Context, r soap.RoundTripper, req *types.Que type QueryHostConnectionInfoBody struct { Req *types.QueryHostConnectionInfo `xml:"urn:vim25 QueryHostConnectionInfo,omitempty"` - Res *types.QueryHostConnectionInfoResponse `xml:"urn:vim25 QueryHostConnectionInfoResponse,omitempty"` + Res *types.QueryHostConnectionInfoResponse `xml:"QueryHostConnectionInfoResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9385,7 +10245,7 @@ func QueryHostConnectionInfo(ctx context.Context, r soap.RoundTripper, req *type type QueryHostPatch_TaskBody struct { Req *types.QueryHostPatch_Task `xml:"urn:vim25 QueryHostPatch_Task,omitempty"` - Res *types.QueryHostPatch_TaskResponse `xml:"urn:vim25 QueryHostPatch_TaskResponse,omitempty"` + Res *types.QueryHostPatch_TaskResponse `xml:"QueryHostPatch_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9405,7 +10265,7 @@ func QueryHostPatch_Task(ctx context.Context, r soap.RoundTripper, req *types.Qu type QueryHostProfileMetadataBody struct { Req *types.QueryHostProfileMetadata `xml:"urn:vim25 QueryHostProfileMetadata,omitempty"` - Res *types.QueryHostProfileMetadataResponse `xml:"urn:vim25 QueryHostProfileMetadataResponse,omitempty"` + Res *types.QueryHostProfileMetadataResponse `xml:"QueryHostProfileMetadataResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9425,7 +10285,7 @@ func QueryHostProfileMetadata(ctx context.Context, r soap.RoundTripper, req *typ type QueryHostStatusBody struct { Req *types.QueryHostStatus `xml:"urn:vim25 QueryHostStatus,omitempty"` - Res *types.QueryHostStatusResponse `xml:"urn:vim25 QueryHostStatusResponse,omitempty"` + Res *types.QueryHostStatusResponse `xml:"QueryHostStatusResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9443,9 +10303,29 @@ func QueryHostStatus(ctx context.Context, r soap.RoundTripper, req *types.QueryH return resBody.Res, nil } +type QueryHostsWithAttachedLunBody struct { + Req *types.QueryHostsWithAttachedLun `xml:"urn:vim25 QueryHostsWithAttachedLun,omitempty"` + Res *types.QueryHostsWithAttachedLunResponse `xml:"QueryHostsWithAttachedLunResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryHostsWithAttachedLunBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryHostsWithAttachedLun(ctx context.Context, r soap.RoundTripper, req *types.QueryHostsWithAttachedLun) (*types.QueryHostsWithAttachedLunResponse, error) { + var reqBody, resBody QueryHostsWithAttachedLunBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type QueryIORMConfigOptionBody struct { Req *types.QueryIORMConfigOption `xml:"urn:vim25 QueryIORMConfigOption,omitempty"` - Res *types.QueryIORMConfigOptionResponse `xml:"urn:vim25 QueryIORMConfigOptionResponse,omitempty"` + Res *types.QueryIORMConfigOptionResponse `xml:"QueryIORMConfigOptionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9465,7 +10345,7 @@ func QueryIORMConfigOption(ctx context.Context, r soap.RoundTripper, req *types. type QueryIPAllocationsBody struct { Req *types.QueryIPAllocations `xml:"urn:vim25 QueryIPAllocations,omitempty"` - Res *types.QueryIPAllocationsResponse `xml:"urn:vim25 QueryIPAllocationsResponse,omitempty"` + Res *types.QueryIPAllocationsResponse `xml:"QueryIPAllocationsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9485,7 +10365,7 @@ func QueryIPAllocations(ctx context.Context, r soap.RoundTripper, req *types.Que type QueryIoFilterInfoBody struct { Req *types.QueryIoFilterInfo `xml:"urn:vim25 QueryIoFilterInfo,omitempty"` - Res *types.QueryIoFilterInfoResponse `xml:"urn:vim25 QueryIoFilterInfoResponse,omitempty"` + Res *types.QueryIoFilterInfoResponse `xml:"QueryIoFilterInfoResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9505,7 +10385,7 @@ func QueryIoFilterInfo(ctx context.Context, r soap.RoundTripper, req *types.Quer type QueryIoFilterIssuesBody struct { Req *types.QueryIoFilterIssues `xml:"urn:vim25 QueryIoFilterIssues,omitempty"` - Res *types.QueryIoFilterIssuesResponse `xml:"urn:vim25 QueryIoFilterIssuesResponse,omitempty"` + Res *types.QueryIoFilterIssuesResponse `xml:"QueryIoFilterIssuesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9525,7 +10405,7 @@ func QueryIoFilterIssues(ctx context.Context, r soap.RoundTripper, req *types.Qu type QueryIpPoolsBody struct { Req *types.QueryIpPools `xml:"urn:vim25 QueryIpPools,omitempty"` - Res *types.QueryIpPoolsResponse `xml:"urn:vim25 QueryIpPoolsResponse,omitempty"` + Res *types.QueryIpPoolsResponse `xml:"QueryIpPoolsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9545,7 +10425,7 @@ func QueryIpPools(ctx context.Context, r soap.RoundTripper, req *types.QueryIpPo type QueryLicenseSourceAvailabilityBody struct { Req *types.QueryLicenseSourceAvailability `xml:"urn:vim25 QueryLicenseSourceAvailability,omitempty"` - Res *types.QueryLicenseSourceAvailabilityResponse `xml:"urn:vim25 QueryLicenseSourceAvailabilityResponse,omitempty"` + Res *types.QueryLicenseSourceAvailabilityResponse `xml:"QueryLicenseSourceAvailabilityResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9565,7 +10445,7 @@ func QueryLicenseSourceAvailability(ctx context.Context, r soap.RoundTripper, re type QueryLicenseUsageBody struct { Req *types.QueryLicenseUsage `xml:"urn:vim25 QueryLicenseUsage,omitempty"` - Res *types.QueryLicenseUsageResponse `xml:"urn:vim25 QueryLicenseUsageResponse,omitempty"` + Res *types.QueryLicenseUsageResponse `xml:"QueryLicenseUsageResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9585,7 +10465,7 @@ func QueryLicenseUsage(ctx context.Context, r soap.RoundTripper, req *types.Quer type QueryLockdownExceptionsBody struct { Req *types.QueryLockdownExceptions `xml:"urn:vim25 QueryLockdownExceptions,omitempty"` - Res *types.QueryLockdownExceptionsResponse `xml:"urn:vim25 QueryLockdownExceptionsResponse,omitempty"` + Res *types.QueryLockdownExceptionsResponse `xml:"QueryLockdownExceptionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9605,7 +10485,7 @@ func QueryLockdownExceptions(ctx context.Context, r soap.RoundTripper, req *type type QueryManagedByBody struct { Req *types.QueryManagedBy `xml:"urn:vim25 QueryManagedBy,omitempty"` - Res *types.QueryManagedByResponse `xml:"urn:vim25 QueryManagedByResponse,omitempty"` + Res *types.QueryManagedByResponse `xml:"QueryManagedByResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9623,9 +10503,29 @@ func QueryManagedBy(ctx context.Context, r soap.RoundTripper, req *types.QueryMa return resBody.Res, nil } +type QueryMaxQueueDepthBody struct { + Req *types.QueryMaxQueueDepth `xml:"urn:vim25 QueryMaxQueueDepth,omitempty"` + Res *types.QueryMaxQueueDepthResponse `xml:"QueryMaxQueueDepthResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryMaxQueueDepthBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryMaxQueueDepth(ctx context.Context, r soap.RoundTripper, req *types.QueryMaxQueueDepth) (*types.QueryMaxQueueDepthResponse, error) { + var reqBody, resBody QueryMaxQueueDepthBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type QueryMemoryOverheadBody struct { Req *types.QueryMemoryOverhead `xml:"urn:vim25 QueryMemoryOverhead,omitempty"` - Res *types.QueryMemoryOverheadResponse `xml:"urn:vim25 QueryMemoryOverheadResponse,omitempty"` + Res *types.QueryMemoryOverheadResponse `xml:"QueryMemoryOverheadResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9645,7 +10545,7 @@ func QueryMemoryOverhead(ctx context.Context, r soap.RoundTripper, req *types.Qu type QueryMemoryOverheadExBody struct { Req *types.QueryMemoryOverheadEx `xml:"urn:vim25 QueryMemoryOverheadEx,omitempty"` - Res *types.QueryMemoryOverheadExResponse `xml:"urn:vim25 QueryMemoryOverheadExResponse,omitempty"` + Res *types.QueryMemoryOverheadExResponse `xml:"QueryMemoryOverheadExResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9665,7 +10565,7 @@ func QueryMemoryOverheadEx(ctx context.Context, r soap.RoundTripper, req *types. type QueryMigrationDependenciesBody struct { Req *types.QueryMigrationDependencies `xml:"urn:vim25 QueryMigrationDependencies,omitempty"` - Res *types.QueryMigrationDependenciesResponse `xml:"urn:vim25 QueryMigrationDependenciesResponse,omitempty"` + Res *types.QueryMigrationDependenciesResponse `xml:"QueryMigrationDependenciesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9685,7 +10585,7 @@ func QueryMigrationDependencies(ctx context.Context, r soap.RoundTripper, req *t type QueryModulesBody struct { Req *types.QueryModules `xml:"urn:vim25 QueryModules,omitempty"` - Res *types.QueryModulesResponse `xml:"urn:vim25 QueryModulesResponse,omitempty"` + Res *types.QueryModulesResponse `xml:"QueryModulesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9705,7 +10605,7 @@ func QueryModules(ctx context.Context, r soap.RoundTripper, req *types.QueryModu type QueryMonitoredEntitiesBody struct { Req *types.QueryMonitoredEntities `xml:"urn:vim25 QueryMonitoredEntities,omitempty"` - Res *types.QueryMonitoredEntitiesResponse `xml:"urn:vim25 QueryMonitoredEntitiesResponse,omitempty"` + Res *types.QueryMonitoredEntitiesResponse `xml:"QueryMonitoredEntitiesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9725,7 +10625,7 @@ func QueryMonitoredEntities(ctx context.Context, r soap.RoundTripper, req *types type QueryNFSUserBody struct { Req *types.QueryNFSUser `xml:"urn:vim25 QueryNFSUser,omitempty"` - Res *types.QueryNFSUserResponse `xml:"urn:vim25 QueryNFSUserResponse,omitempty"` + Res *types.QueryNFSUserResponse `xml:"QueryNFSUserResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9745,7 +10645,7 @@ func QueryNFSUser(ctx context.Context, r soap.RoundTripper, req *types.QueryNFSU type QueryNetConfigBody struct { Req *types.QueryNetConfig `xml:"urn:vim25 QueryNetConfig,omitempty"` - Res *types.QueryNetConfigResponse `xml:"urn:vim25 QueryNetConfigResponse,omitempty"` + Res *types.QueryNetConfigResponse `xml:"QueryNetConfigResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9765,7 +10665,7 @@ func QueryNetConfig(ctx context.Context, r soap.RoundTripper, req *types.QueryNe type QueryNetworkHintBody struct { Req *types.QueryNetworkHint `xml:"urn:vim25 QueryNetworkHint,omitempty"` - Res *types.QueryNetworkHintResponse `xml:"urn:vim25 QueryNetworkHintResponse,omitempty"` + Res *types.QueryNetworkHintResponse `xml:"QueryNetworkHintResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9785,7 +10685,7 @@ func QueryNetworkHint(ctx context.Context, r soap.RoundTripper, req *types.Query type QueryObjectsOnPhysicalVsanDiskBody struct { Req *types.QueryObjectsOnPhysicalVsanDisk `xml:"urn:vim25 QueryObjectsOnPhysicalVsanDisk,omitempty"` - Res *types.QueryObjectsOnPhysicalVsanDiskResponse `xml:"urn:vim25 QueryObjectsOnPhysicalVsanDiskResponse,omitempty"` + Res *types.QueryObjectsOnPhysicalVsanDiskResponse `xml:"QueryObjectsOnPhysicalVsanDiskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9805,7 +10705,7 @@ func QueryObjectsOnPhysicalVsanDisk(ctx context.Context, r soap.RoundTripper, re type QueryOptionsBody struct { Req *types.QueryOptions `xml:"urn:vim25 QueryOptions,omitempty"` - Res *types.QueryOptionsResponse `xml:"urn:vim25 QueryOptionsResponse,omitempty"` + Res *types.QueryOptionsResponse `xml:"QueryOptionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9825,7 +10725,7 @@ func QueryOptions(ctx context.Context, r soap.RoundTripper, req *types.QueryOpti type QueryPartitionCreateDescBody struct { Req *types.QueryPartitionCreateDesc `xml:"urn:vim25 QueryPartitionCreateDesc,omitempty"` - Res *types.QueryPartitionCreateDescResponse `xml:"urn:vim25 QueryPartitionCreateDescResponse,omitempty"` + Res *types.QueryPartitionCreateDescResponse `xml:"QueryPartitionCreateDescResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9845,7 +10745,7 @@ func QueryPartitionCreateDesc(ctx context.Context, r soap.RoundTripper, req *typ type QueryPartitionCreateOptionsBody struct { Req *types.QueryPartitionCreateOptions `xml:"urn:vim25 QueryPartitionCreateOptions,omitempty"` - Res *types.QueryPartitionCreateOptionsResponse `xml:"urn:vim25 QueryPartitionCreateOptionsResponse,omitempty"` + Res *types.QueryPartitionCreateOptionsResponse `xml:"QueryPartitionCreateOptionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9865,7 +10765,7 @@ func QueryPartitionCreateOptions(ctx context.Context, r soap.RoundTripper, req * type QueryPathSelectionPolicyOptionsBody struct { Req *types.QueryPathSelectionPolicyOptions `xml:"urn:vim25 QueryPathSelectionPolicyOptions,omitempty"` - Res *types.QueryPathSelectionPolicyOptionsResponse `xml:"urn:vim25 QueryPathSelectionPolicyOptionsResponse,omitempty"` + Res *types.QueryPathSelectionPolicyOptionsResponse `xml:"QueryPathSelectionPolicyOptionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9885,7 +10785,7 @@ func QueryPathSelectionPolicyOptions(ctx context.Context, r soap.RoundTripper, r type QueryPerfBody struct { Req *types.QueryPerf `xml:"urn:vim25 QueryPerf,omitempty"` - Res *types.QueryPerfResponse `xml:"urn:vim25 QueryPerfResponse,omitempty"` + Res *types.QueryPerfResponse `xml:"QueryPerfResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9905,7 +10805,7 @@ func QueryPerf(ctx context.Context, r soap.RoundTripper, req *types.QueryPerf) ( type QueryPerfCompositeBody struct { Req *types.QueryPerfComposite `xml:"urn:vim25 QueryPerfComposite,omitempty"` - Res *types.QueryPerfCompositeResponse `xml:"urn:vim25 QueryPerfCompositeResponse,omitempty"` + Res *types.QueryPerfCompositeResponse `xml:"QueryPerfCompositeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9925,7 +10825,7 @@ func QueryPerfComposite(ctx context.Context, r soap.RoundTripper, req *types.Que type QueryPerfCounterBody struct { Req *types.QueryPerfCounter `xml:"urn:vim25 QueryPerfCounter,omitempty"` - Res *types.QueryPerfCounterResponse `xml:"urn:vim25 QueryPerfCounterResponse,omitempty"` + Res *types.QueryPerfCounterResponse `xml:"QueryPerfCounterResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9945,7 +10845,7 @@ func QueryPerfCounter(ctx context.Context, r soap.RoundTripper, req *types.Query type QueryPerfCounterByLevelBody struct { Req *types.QueryPerfCounterByLevel `xml:"urn:vim25 QueryPerfCounterByLevel,omitempty"` - Res *types.QueryPerfCounterByLevelResponse `xml:"urn:vim25 QueryPerfCounterByLevelResponse,omitempty"` + Res *types.QueryPerfCounterByLevelResponse `xml:"QueryPerfCounterByLevelResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9965,7 +10865,7 @@ func QueryPerfCounterByLevel(ctx context.Context, r soap.RoundTripper, req *type type QueryPerfProviderSummaryBody struct { Req *types.QueryPerfProviderSummary `xml:"urn:vim25 QueryPerfProviderSummary,omitempty"` - Res *types.QueryPerfProviderSummaryResponse `xml:"urn:vim25 QueryPerfProviderSummaryResponse,omitempty"` + Res *types.QueryPerfProviderSummaryResponse `xml:"QueryPerfProviderSummaryResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9985,7 +10885,7 @@ func QueryPerfProviderSummary(ctx context.Context, r soap.RoundTripper, req *typ type QueryPhysicalVsanDisksBody struct { Req *types.QueryPhysicalVsanDisks `xml:"urn:vim25 QueryPhysicalVsanDisks,omitempty"` - Res *types.QueryPhysicalVsanDisksResponse `xml:"urn:vim25 QueryPhysicalVsanDisksResponse,omitempty"` + Res *types.QueryPhysicalVsanDisksResponse `xml:"QueryPhysicalVsanDisksResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10005,7 +10905,7 @@ func QueryPhysicalVsanDisks(ctx context.Context, r soap.RoundTripper, req *types type QueryPnicStatusBody struct { Req *types.QueryPnicStatus `xml:"urn:vim25 QueryPnicStatus,omitempty"` - Res *types.QueryPnicStatusResponse `xml:"urn:vim25 QueryPnicStatusResponse,omitempty"` + Res *types.QueryPnicStatusResponse `xml:"QueryPnicStatusResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10025,7 +10925,7 @@ func QueryPnicStatus(ctx context.Context, r soap.RoundTripper, req *types.QueryP type QueryPolicyMetadataBody struct { Req *types.QueryPolicyMetadata `xml:"urn:vim25 QueryPolicyMetadata,omitempty"` - Res *types.QueryPolicyMetadataResponse `xml:"urn:vim25 QueryPolicyMetadataResponse,omitempty"` + Res *types.QueryPolicyMetadataResponse `xml:"QueryPolicyMetadataResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10043,9 +10943,29 @@ func QueryPolicyMetadata(ctx context.Context, r soap.RoundTripper, req *types.Qu return resBody.Res, nil } +type QueryProductLockerLocationBody struct { + Req *types.QueryProductLockerLocation `xml:"urn:vim25 QueryProductLockerLocation,omitempty"` + Res *types.QueryProductLockerLocationResponse `xml:"QueryProductLockerLocationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryProductLockerLocationBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryProductLockerLocation(ctx context.Context, r soap.RoundTripper, req *types.QueryProductLockerLocation) (*types.QueryProductLockerLocationResponse, error) { + var reqBody, resBody QueryProductLockerLocationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type QueryProfileStructureBody struct { Req *types.QueryProfileStructure `xml:"urn:vim25 QueryProfileStructure,omitempty"` - Res *types.QueryProfileStructureResponse `xml:"urn:vim25 QueryProfileStructureResponse,omitempty"` + Res *types.QueryProfileStructureResponse `xml:"QueryProfileStructureResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10065,7 +10985,7 @@ func QueryProfileStructure(ctx context.Context, r soap.RoundTripper, req *types. type QueryProviderListBody struct { Req *types.QueryProviderList `xml:"urn:vim25 QueryProviderList,omitempty"` - Res *types.QueryProviderListResponse `xml:"urn:vim25 QueryProviderListResponse,omitempty"` + Res *types.QueryProviderListResponse `xml:"QueryProviderListResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10085,7 +11005,7 @@ func QueryProviderList(ctx context.Context, r soap.RoundTripper, req *types.Quer type QueryProviderNameBody struct { Req *types.QueryProviderName `xml:"urn:vim25 QueryProviderName,omitempty"` - Res *types.QueryProviderNameResponse `xml:"urn:vim25 QueryProviderNameResponse,omitempty"` + Res *types.QueryProviderNameResponse `xml:"QueryProviderNameResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10105,7 +11025,7 @@ func QueryProviderName(ctx context.Context, r soap.RoundTripper, req *types.Quer type QueryResourceConfigOptionBody struct { Req *types.QueryResourceConfigOption `xml:"urn:vim25 QueryResourceConfigOption,omitempty"` - Res *types.QueryResourceConfigOptionResponse `xml:"urn:vim25 QueryResourceConfigOptionResponse,omitempty"` + Res *types.QueryResourceConfigOptionResponse `xml:"QueryResourceConfigOptionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10125,7 +11045,7 @@ func QueryResourceConfigOption(ctx context.Context, r soap.RoundTripper, req *ty type QueryServiceListBody struct { Req *types.QueryServiceList `xml:"urn:vim25 QueryServiceList,omitempty"` - Res *types.QueryServiceListResponse `xml:"urn:vim25 QueryServiceListResponse,omitempty"` + Res *types.QueryServiceListResponse `xml:"QueryServiceListResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10145,7 +11065,7 @@ func QueryServiceList(ctx context.Context, r soap.RoundTripper, req *types.Query type QueryStorageArrayTypePolicyOptionsBody struct { Req *types.QueryStorageArrayTypePolicyOptions `xml:"urn:vim25 QueryStorageArrayTypePolicyOptions,omitempty"` - Res *types.QueryStorageArrayTypePolicyOptionsResponse `xml:"urn:vim25 QueryStorageArrayTypePolicyOptionsResponse,omitempty"` + Res *types.QueryStorageArrayTypePolicyOptionsResponse `xml:"QueryStorageArrayTypePolicyOptionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10165,7 +11085,7 @@ func QueryStorageArrayTypePolicyOptions(ctx context.Context, r soap.RoundTripper type QuerySupportedFeaturesBody struct { Req *types.QuerySupportedFeatures `xml:"urn:vim25 QuerySupportedFeatures,omitempty"` - Res *types.QuerySupportedFeaturesResponse `xml:"urn:vim25 QuerySupportedFeaturesResponse,omitempty"` + Res *types.QuerySupportedFeaturesResponse `xml:"QuerySupportedFeaturesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10183,9 +11103,29 @@ func QuerySupportedFeatures(ctx context.Context, r soap.RoundTripper, req *types return resBody.Res, nil } +type QuerySupportedNetworkOffloadSpecBody struct { + Req *types.QuerySupportedNetworkOffloadSpec `xml:"urn:vim25 QuerySupportedNetworkOffloadSpec,omitempty"` + Res *types.QuerySupportedNetworkOffloadSpecResponse `xml:"QuerySupportedNetworkOffloadSpecResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QuerySupportedNetworkOffloadSpecBody) Fault() *soap.Fault { return b.Fault_ } + +func QuerySupportedNetworkOffloadSpec(ctx context.Context, r soap.RoundTripper, req *types.QuerySupportedNetworkOffloadSpec) (*types.QuerySupportedNetworkOffloadSpecResponse, error) { + var reqBody, resBody QuerySupportedNetworkOffloadSpecBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type QuerySyncingVsanObjectsBody struct { Req *types.QuerySyncingVsanObjects `xml:"urn:vim25 QuerySyncingVsanObjects,omitempty"` - Res *types.QuerySyncingVsanObjectsResponse `xml:"urn:vim25 QuerySyncingVsanObjectsResponse,omitempty"` + Res *types.QuerySyncingVsanObjectsResponse `xml:"QuerySyncingVsanObjectsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10205,7 +11145,7 @@ func QuerySyncingVsanObjects(ctx context.Context, r soap.RoundTripper, req *type type QuerySystemUsersBody struct { Req *types.QuerySystemUsers `xml:"urn:vim25 QuerySystemUsers,omitempty"` - Res *types.QuerySystemUsersResponse `xml:"urn:vim25 QuerySystemUsersResponse,omitempty"` + Res *types.QuerySystemUsersResponse `xml:"QuerySystemUsersResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10225,7 +11165,7 @@ func QuerySystemUsers(ctx context.Context, r soap.RoundTripper, req *types.Query type QueryTargetCapabilitiesBody struct { Req *types.QueryTargetCapabilities `xml:"urn:vim25 QueryTargetCapabilities,omitempty"` - Res *types.QueryTargetCapabilitiesResponse `xml:"urn:vim25 QueryTargetCapabilitiesResponse,omitempty"` + Res *types.QueryTargetCapabilitiesResponse `xml:"QueryTargetCapabilitiesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10245,7 +11185,7 @@ func QueryTargetCapabilities(ctx context.Context, r soap.RoundTripper, req *type type QueryTpmAttestationReportBody struct { Req *types.QueryTpmAttestationReport `xml:"urn:vim25 QueryTpmAttestationReport,omitempty"` - Res *types.QueryTpmAttestationReportResponse `xml:"urn:vim25 QueryTpmAttestationReportResponse,omitempty"` + Res *types.QueryTpmAttestationReportResponse `xml:"QueryTpmAttestationReportResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10265,7 +11205,7 @@ func QueryTpmAttestationReport(ctx context.Context, r soap.RoundTripper, req *ty type QueryUnmonitoredHostsBody struct { Req *types.QueryUnmonitoredHosts `xml:"urn:vim25 QueryUnmonitoredHosts,omitempty"` - Res *types.QueryUnmonitoredHostsResponse `xml:"urn:vim25 QueryUnmonitoredHostsResponse,omitempty"` + Res *types.QueryUnmonitoredHostsResponse `xml:"QueryUnmonitoredHostsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10285,7 +11225,7 @@ func QueryUnmonitoredHosts(ctx context.Context, r soap.RoundTripper, req *types. type QueryUnownedFilesBody struct { Req *types.QueryUnownedFiles `xml:"urn:vim25 QueryUnownedFiles,omitempty"` - Res *types.QueryUnownedFilesResponse `xml:"urn:vim25 QueryUnownedFilesResponse,omitempty"` + Res *types.QueryUnownedFilesResponse `xml:"QueryUnownedFilesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10305,7 +11245,7 @@ func QueryUnownedFiles(ctx context.Context, r soap.RoundTripper, req *types.Quer type QueryUnresolvedVmfsVolumeBody struct { Req *types.QueryUnresolvedVmfsVolume `xml:"urn:vim25 QueryUnresolvedVmfsVolume,omitempty"` - Res *types.QueryUnresolvedVmfsVolumeResponse `xml:"urn:vim25 QueryUnresolvedVmfsVolumeResponse,omitempty"` + Res *types.QueryUnresolvedVmfsVolumeResponse `xml:"QueryUnresolvedVmfsVolumeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10325,7 +11265,7 @@ func QueryUnresolvedVmfsVolume(ctx context.Context, r soap.RoundTripper, req *ty type QueryUnresolvedVmfsVolumesBody struct { Req *types.QueryUnresolvedVmfsVolumes `xml:"urn:vim25 QueryUnresolvedVmfsVolumes,omitempty"` - Res *types.QueryUnresolvedVmfsVolumesResponse `xml:"urn:vim25 QueryUnresolvedVmfsVolumesResponse,omitempty"` + Res *types.QueryUnresolvedVmfsVolumesResponse `xml:"QueryUnresolvedVmfsVolumesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10345,7 +11285,7 @@ func QueryUnresolvedVmfsVolumes(ctx context.Context, r soap.RoundTripper, req *t type QueryUsedVlanIdInDvsBody struct { Req *types.QueryUsedVlanIdInDvs `xml:"urn:vim25 QueryUsedVlanIdInDvs,omitempty"` - Res *types.QueryUsedVlanIdInDvsResponse `xml:"urn:vim25 QueryUsedVlanIdInDvsResponse,omitempty"` + Res *types.QueryUsedVlanIdInDvsResponse `xml:"QueryUsedVlanIdInDvsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10365,7 +11305,7 @@ func QueryUsedVlanIdInDvs(ctx context.Context, r soap.RoundTripper, req *types.Q type QueryVMotionCompatibilityBody struct { Req *types.QueryVMotionCompatibility `xml:"urn:vim25 QueryVMotionCompatibility,omitempty"` - Res *types.QueryVMotionCompatibilityResponse `xml:"urn:vim25 QueryVMotionCompatibilityResponse,omitempty"` + Res *types.QueryVMotionCompatibilityResponse `xml:"QueryVMotionCompatibilityResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10385,7 +11325,7 @@ func QueryVMotionCompatibility(ctx context.Context, r soap.RoundTripper, req *ty type QueryVMotionCompatibilityEx_TaskBody struct { Req *types.QueryVMotionCompatibilityEx_Task `xml:"urn:vim25 QueryVMotionCompatibilityEx_Task,omitempty"` - Res *types.QueryVMotionCompatibilityEx_TaskResponse `xml:"urn:vim25 QueryVMotionCompatibilityEx_TaskResponse,omitempty"` + Res *types.QueryVMotionCompatibilityEx_TaskResponse `xml:"QueryVMotionCompatibilityEx_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10405,7 +11345,7 @@ func QueryVMotionCompatibilityEx_Task(ctx context.Context, r soap.RoundTripper, type QueryVirtualDiskFragmentationBody struct { Req *types.QueryVirtualDiskFragmentation `xml:"urn:vim25 QueryVirtualDiskFragmentation,omitempty"` - Res *types.QueryVirtualDiskFragmentationResponse `xml:"urn:vim25 QueryVirtualDiskFragmentationResponse,omitempty"` + Res *types.QueryVirtualDiskFragmentationResponse `xml:"QueryVirtualDiskFragmentationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10425,7 +11365,7 @@ func QueryVirtualDiskFragmentation(ctx context.Context, r soap.RoundTripper, req type QueryVirtualDiskGeometryBody struct { Req *types.QueryVirtualDiskGeometry `xml:"urn:vim25 QueryVirtualDiskGeometry,omitempty"` - Res *types.QueryVirtualDiskGeometryResponse `xml:"urn:vim25 QueryVirtualDiskGeometryResponse,omitempty"` + Res *types.QueryVirtualDiskGeometryResponse `xml:"QueryVirtualDiskGeometryResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10445,7 +11385,7 @@ func QueryVirtualDiskGeometry(ctx context.Context, r soap.RoundTripper, req *typ type QueryVirtualDiskUuidBody struct { Req *types.QueryVirtualDiskUuid `xml:"urn:vim25 QueryVirtualDiskUuid,omitempty"` - Res *types.QueryVirtualDiskUuidResponse `xml:"urn:vim25 QueryVirtualDiskUuidResponse,omitempty"` + Res *types.QueryVirtualDiskUuidResponse `xml:"QueryVirtualDiskUuidResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10465,7 +11405,7 @@ func QueryVirtualDiskUuid(ctx context.Context, r soap.RoundTripper, req *types.Q type QueryVmfsConfigOptionBody struct { Req *types.QueryVmfsConfigOption `xml:"urn:vim25 QueryVmfsConfigOption,omitempty"` - Res *types.QueryVmfsConfigOptionResponse `xml:"urn:vim25 QueryVmfsConfigOptionResponse,omitempty"` + Res *types.QueryVmfsConfigOptionResponse `xml:"QueryVmfsConfigOptionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10485,7 +11425,7 @@ func QueryVmfsConfigOption(ctx context.Context, r soap.RoundTripper, req *types. type QueryVmfsDatastoreCreateOptionsBody struct { Req *types.QueryVmfsDatastoreCreateOptions `xml:"urn:vim25 QueryVmfsDatastoreCreateOptions,omitempty"` - Res *types.QueryVmfsDatastoreCreateOptionsResponse `xml:"urn:vim25 QueryVmfsDatastoreCreateOptionsResponse,omitempty"` + Res *types.QueryVmfsDatastoreCreateOptionsResponse `xml:"QueryVmfsDatastoreCreateOptionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10505,7 +11445,7 @@ func QueryVmfsDatastoreCreateOptions(ctx context.Context, r soap.RoundTripper, r type QueryVmfsDatastoreExpandOptionsBody struct { Req *types.QueryVmfsDatastoreExpandOptions `xml:"urn:vim25 QueryVmfsDatastoreExpandOptions,omitempty"` - Res *types.QueryVmfsDatastoreExpandOptionsResponse `xml:"urn:vim25 QueryVmfsDatastoreExpandOptionsResponse,omitempty"` + Res *types.QueryVmfsDatastoreExpandOptionsResponse `xml:"QueryVmfsDatastoreExpandOptionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10525,7 +11465,7 @@ func QueryVmfsDatastoreExpandOptions(ctx context.Context, r soap.RoundTripper, r type QueryVmfsDatastoreExtendOptionsBody struct { Req *types.QueryVmfsDatastoreExtendOptions `xml:"urn:vim25 QueryVmfsDatastoreExtendOptions,omitempty"` - Res *types.QueryVmfsDatastoreExtendOptionsResponse `xml:"urn:vim25 QueryVmfsDatastoreExtendOptionsResponse,omitempty"` + Res *types.QueryVmfsDatastoreExtendOptionsResponse `xml:"QueryVmfsDatastoreExtendOptionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10545,7 +11485,7 @@ func QueryVmfsDatastoreExtendOptions(ctx context.Context, r soap.RoundTripper, r type QueryVnicStatusBody struct { Req *types.QueryVnicStatus `xml:"urn:vim25 QueryVnicStatus,omitempty"` - Res *types.QueryVnicStatusResponse `xml:"urn:vim25 QueryVnicStatusResponse,omitempty"` + Res *types.QueryVnicStatusResponse `xml:"QueryVnicStatusResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10565,7 +11505,7 @@ func QueryVnicStatus(ctx context.Context, r soap.RoundTripper, req *types.QueryV type QueryVsanObjectUuidsByFilterBody struct { Req *types.QueryVsanObjectUuidsByFilter `xml:"urn:vim25 QueryVsanObjectUuidsByFilter,omitempty"` - Res *types.QueryVsanObjectUuidsByFilterResponse `xml:"urn:vim25 QueryVsanObjectUuidsByFilterResponse,omitempty"` + Res *types.QueryVsanObjectUuidsByFilterResponse `xml:"QueryVsanObjectUuidsByFilterResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10585,7 +11525,7 @@ func QueryVsanObjectUuidsByFilter(ctx context.Context, r soap.RoundTripper, req type QueryVsanObjectsBody struct { Req *types.QueryVsanObjects `xml:"urn:vim25 QueryVsanObjects,omitempty"` - Res *types.QueryVsanObjectsResponse `xml:"urn:vim25 QueryVsanObjectsResponse,omitempty"` + Res *types.QueryVsanObjectsResponse `xml:"QueryVsanObjectsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10605,7 +11545,7 @@ func QueryVsanObjects(ctx context.Context, r soap.RoundTripper, req *types.Query type QueryVsanStatisticsBody struct { Req *types.QueryVsanStatistics `xml:"urn:vim25 QueryVsanStatistics,omitempty"` - Res *types.QueryVsanStatisticsResponse `xml:"urn:vim25 QueryVsanStatisticsResponse,omitempty"` + Res *types.QueryVsanStatisticsResponse `xml:"QueryVsanStatisticsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10625,7 +11565,7 @@ func QueryVsanStatistics(ctx context.Context, r soap.RoundTripper, req *types.Qu type QueryVsanUpgradeStatusBody struct { Req *types.QueryVsanUpgradeStatus `xml:"urn:vim25 QueryVsanUpgradeStatus,omitempty"` - Res *types.QueryVsanUpgradeStatusResponse `xml:"urn:vim25 QueryVsanUpgradeStatusResponse,omitempty"` + Res *types.QueryVsanUpgradeStatusResponse `xml:"QueryVsanUpgradeStatusResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10645,7 +11585,7 @@ func QueryVsanUpgradeStatus(ctx context.Context, r soap.RoundTripper, req *types type ReadEnvironmentVariableInGuestBody struct { Req *types.ReadEnvironmentVariableInGuest `xml:"urn:vim25 ReadEnvironmentVariableInGuest,omitempty"` - Res *types.ReadEnvironmentVariableInGuestResponse `xml:"urn:vim25 ReadEnvironmentVariableInGuestResponse,omitempty"` + Res *types.ReadEnvironmentVariableInGuestResponse `xml:"ReadEnvironmentVariableInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10665,7 +11605,7 @@ func ReadEnvironmentVariableInGuest(ctx context.Context, r soap.RoundTripper, re type ReadNextEventsBody struct { Req *types.ReadNextEvents `xml:"urn:vim25 ReadNextEvents,omitempty"` - Res *types.ReadNextEventsResponse `xml:"urn:vim25 ReadNextEventsResponse,omitempty"` + Res *types.ReadNextEventsResponse `xml:"ReadNextEventsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10685,7 +11625,7 @@ func ReadNextEvents(ctx context.Context, r soap.RoundTripper, req *types.ReadNex type ReadNextTasksBody struct { Req *types.ReadNextTasks `xml:"urn:vim25 ReadNextTasks,omitempty"` - Res *types.ReadNextTasksResponse `xml:"urn:vim25 ReadNextTasksResponse,omitempty"` + Res *types.ReadNextTasksResponse `xml:"ReadNextTasksResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10705,7 +11645,7 @@ func ReadNextTasks(ctx context.Context, r soap.RoundTripper, req *types.ReadNext type ReadPreviousEventsBody struct { Req *types.ReadPreviousEvents `xml:"urn:vim25 ReadPreviousEvents,omitempty"` - Res *types.ReadPreviousEventsResponse `xml:"urn:vim25 ReadPreviousEventsResponse,omitempty"` + Res *types.ReadPreviousEventsResponse `xml:"ReadPreviousEventsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10725,7 +11665,7 @@ func ReadPreviousEvents(ctx context.Context, r soap.RoundTripper, req *types.Rea type ReadPreviousTasksBody struct { Req *types.ReadPreviousTasks `xml:"urn:vim25 ReadPreviousTasks,omitempty"` - Res *types.ReadPreviousTasksResponse `xml:"urn:vim25 ReadPreviousTasksResponse,omitempty"` + Res *types.ReadPreviousTasksResponse `xml:"ReadPreviousTasksResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10745,7 +11685,7 @@ func ReadPreviousTasks(ctx context.Context, r soap.RoundTripper, req *types.Read type RebootGuestBody struct { Req *types.RebootGuest `xml:"urn:vim25 RebootGuest,omitempty"` - Res *types.RebootGuestResponse `xml:"urn:vim25 RebootGuestResponse,omitempty"` + Res *types.RebootGuestResponse `xml:"RebootGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10765,7 +11705,7 @@ func RebootGuest(ctx context.Context, r soap.RoundTripper, req *types.RebootGues type RebootHost_TaskBody struct { Req *types.RebootHost_Task `xml:"urn:vim25 RebootHost_Task,omitempty"` - Res *types.RebootHost_TaskResponse `xml:"urn:vim25 RebootHost_TaskResponse,omitempty"` + Res *types.RebootHost_TaskResponse `xml:"RebootHost_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10785,7 +11725,7 @@ func RebootHost_Task(ctx context.Context, r soap.RoundTripper, req *types.Reboot type RecommendDatastoresBody struct { Req *types.RecommendDatastores `xml:"urn:vim25 RecommendDatastores,omitempty"` - Res *types.RecommendDatastoresResponse `xml:"urn:vim25 RecommendDatastoresResponse,omitempty"` + Res *types.RecommendDatastoresResponse `xml:"RecommendDatastoresResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10805,7 +11745,7 @@ func RecommendDatastores(ctx context.Context, r soap.RoundTripper, req *types.Re type RecommendHostsForVmBody struct { Req *types.RecommendHostsForVm `xml:"urn:vim25 RecommendHostsForVm,omitempty"` - Res *types.RecommendHostsForVmResponse `xml:"urn:vim25 RecommendHostsForVmResponse,omitempty"` + Res *types.RecommendHostsForVmResponse `xml:"RecommendHostsForVmResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10825,7 +11765,7 @@ func RecommendHostsForVm(ctx context.Context, r soap.RoundTripper, req *types.Re type RecommissionVsanNode_TaskBody struct { Req *types.RecommissionVsanNode_Task `xml:"urn:vim25 RecommissionVsanNode_Task,omitempty"` - Res *types.RecommissionVsanNode_TaskResponse `xml:"urn:vim25 RecommissionVsanNode_TaskResponse,omitempty"` + Res *types.RecommissionVsanNode_TaskResponse `xml:"RecommissionVsanNode_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10845,7 +11785,7 @@ func RecommissionVsanNode_Task(ctx context.Context, r soap.RoundTripper, req *ty type ReconcileDatastoreInventory_TaskBody struct { Req *types.ReconcileDatastoreInventory_Task `xml:"urn:vim25 ReconcileDatastoreInventory_Task,omitempty"` - Res *types.ReconcileDatastoreInventory_TaskResponse `xml:"urn:vim25 ReconcileDatastoreInventory_TaskResponse,omitempty"` + Res *types.ReconcileDatastoreInventory_TaskResponse `xml:"ReconcileDatastoreInventory_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10865,7 +11805,7 @@ func ReconcileDatastoreInventory_Task(ctx context.Context, r soap.RoundTripper, type ReconfigVM_TaskBody struct { Req *types.ReconfigVM_Task `xml:"urn:vim25 ReconfigVM_Task,omitempty"` - Res *types.ReconfigVM_TaskResponse `xml:"urn:vim25 ReconfigVM_TaskResponse,omitempty"` + Res *types.ReconfigVM_TaskResponse `xml:"ReconfigVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10885,7 +11825,7 @@ func ReconfigVM_Task(ctx context.Context, r soap.RoundTripper, req *types.Reconf type ReconfigurationSatisfiableBody struct { Req *types.ReconfigurationSatisfiable `xml:"urn:vim25 ReconfigurationSatisfiable,omitempty"` - Res *types.ReconfigurationSatisfiableResponse `xml:"urn:vim25 ReconfigurationSatisfiableResponse,omitempty"` + Res *types.ReconfigurationSatisfiableResponse `xml:"ReconfigurationSatisfiableResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10905,7 +11845,7 @@ func ReconfigurationSatisfiable(ctx context.Context, r soap.RoundTripper, req *t type ReconfigureAlarmBody struct { Req *types.ReconfigureAlarm `xml:"urn:vim25 ReconfigureAlarm,omitempty"` - Res *types.ReconfigureAlarmResponse `xml:"urn:vim25 ReconfigureAlarmResponse,omitempty"` + Res *types.ReconfigureAlarmResponse `xml:"ReconfigureAlarmResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10925,7 +11865,7 @@ func ReconfigureAlarm(ctx context.Context, r soap.RoundTripper, req *types.Recon type ReconfigureAutostartBody struct { Req *types.ReconfigureAutostart `xml:"urn:vim25 ReconfigureAutostart,omitempty"` - Res *types.ReconfigureAutostartResponse `xml:"urn:vim25 ReconfigureAutostartResponse,omitempty"` + Res *types.ReconfigureAutostartResponse `xml:"ReconfigureAutostartResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10945,7 +11885,7 @@ func ReconfigureAutostart(ctx context.Context, r soap.RoundTripper, req *types.R type ReconfigureCluster_TaskBody struct { Req *types.ReconfigureCluster_Task `xml:"urn:vim25 ReconfigureCluster_Task,omitempty"` - Res *types.ReconfigureCluster_TaskResponse `xml:"urn:vim25 ReconfigureCluster_TaskResponse,omitempty"` + Res *types.ReconfigureCluster_TaskResponse `xml:"ReconfigureCluster_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10965,7 +11905,7 @@ func ReconfigureCluster_Task(ctx context.Context, r soap.RoundTripper, req *type type ReconfigureComputeResource_TaskBody struct { Req *types.ReconfigureComputeResource_Task `xml:"urn:vim25 ReconfigureComputeResource_Task,omitempty"` - Res *types.ReconfigureComputeResource_TaskResponse `xml:"urn:vim25 ReconfigureComputeResource_TaskResponse,omitempty"` + Res *types.ReconfigureComputeResource_TaskResponse `xml:"ReconfigureComputeResource_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10985,7 +11925,7 @@ func ReconfigureComputeResource_Task(ctx context.Context, r soap.RoundTripper, r type ReconfigureDVPort_TaskBody struct { Req *types.ReconfigureDVPort_Task `xml:"urn:vim25 ReconfigureDVPort_Task,omitempty"` - Res *types.ReconfigureDVPort_TaskResponse `xml:"urn:vim25 ReconfigureDVPort_TaskResponse,omitempty"` + Res *types.ReconfigureDVPort_TaskResponse `xml:"ReconfigureDVPort_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11005,7 +11945,7 @@ func ReconfigureDVPort_Task(ctx context.Context, r soap.RoundTripper, req *types type ReconfigureDVPortgroup_TaskBody struct { Req *types.ReconfigureDVPortgroup_Task `xml:"urn:vim25 ReconfigureDVPortgroup_Task,omitempty"` - Res *types.ReconfigureDVPortgroup_TaskResponse `xml:"urn:vim25 ReconfigureDVPortgroup_TaskResponse,omitempty"` + Res *types.ReconfigureDVPortgroup_TaskResponse `xml:"ReconfigureDVPortgroup_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11025,7 +11965,7 @@ func ReconfigureDVPortgroup_Task(ctx context.Context, r soap.RoundTripper, req * type ReconfigureDatacenter_TaskBody struct { Req *types.ReconfigureDatacenter_Task `xml:"urn:vim25 ReconfigureDatacenter_Task,omitempty"` - Res *types.ReconfigureDatacenter_TaskResponse `xml:"urn:vim25 ReconfigureDatacenter_TaskResponse,omitempty"` + Res *types.ReconfigureDatacenter_TaskResponse `xml:"ReconfigureDatacenter_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11045,7 +11985,7 @@ func ReconfigureDatacenter_Task(ctx context.Context, r soap.RoundTripper, req *t type ReconfigureDomObjectBody struct { Req *types.ReconfigureDomObject `xml:"urn:vim25 ReconfigureDomObject,omitempty"` - Res *types.ReconfigureDomObjectResponse `xml:"urn:vim25 ReconfigureDomObjectResponse,omitempty"` + Res *types.ReconfigureDomObjectResponse `xml:"ReconfigureDomObjectResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11065,7 +12005,7 @@ func ReconfigureDomObject(ctx context.Context, r soap.RoundTripper, req *types.R type ReconfigureDvs_TaskBody struct { Req *types.ReconfigureDvs_Task `xml:"urn:vim25 ReconfigureDvs_Task,omitempty"` - Res *types.ReconfigureDvs_TaskResponse `xml:"urn:vim25 ReconfigureDvs_TaskResponse,omitempty"` + Res *types.ReconfigureDvs_TaskResponse `xml:"ReconfigureDvs_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11085,7 +12025,7 @@ func ReconfigureDvs_Task(ctx context.Context, r soap.RoundTripper, req *types.Re type ReconfigureHostForDAS_TaskBody struct { Req *types.ReconfigureHostForDAS_Task `xml:"urn:vim25 ReconfigureHostForDAS_Task,omitempty"` - Res *types.ReconfigureHostForDAS_TaskResponse `xml:"urn:vim25 ReconfigureHostForDAS_TaskResponse,omitempty"` + Res *types.ReconfigureHostForDAS_TaskResponse `xml:"ReconfigureHostForDAS_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11105,7 +12045,7 @@ func ReconfigureHostForDAS_Task(ctx context.Context, r soap.RoundTripper, req *t type ReconfigureScheduledTaskBody struct { Req *types.ReconfigureScheduledTask `xml:"urn:vim25 ReconfigureScheduledTask,omitempty"` - Res *types.ReconfigureScheduledTaskResponse `xml:"urn:vim25 ReconfigureScheduledTaskResponse,omitempty"` + Res *types.ReconfigureScheduledTaskResponse `xml:"ReconfigureScheduledTaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11125,7 +12065,7 @@ func ReconfigureScheduledTask(ctx context.Context, r soap.RoundTripper, req *typ type ReconfigureServiceConsoleReservationBody struct { Req *types.ReconfigureServiceConsoleReservation `xml:"urn:vim25 ReconfigureServiceConsoleReservation,omitempty"` - Res *types.ReconfigureServiceConsoleReservationResponse `xml:"urn:vim25 ReconfigureServiceConsoleReservationResponse,omitempty"` + Res *types.ReconfigureServiceConsoleReservationResponse `xml:"ReconfigureServiceConsoleReservationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11145,7 +12085,7 @@ func ReconfigureServiceConsoleReservation(ctx context.Context, r soap.RoundTripp type ReconfigureSnmpAgentBody struct { Req *types.ReconfigureSnmpAgent `xml:"urn:vim25 ReconfigureSnmpAgent,omitempty"` - Res *types.ReconfigureSnmpAgentResponse `xml:"urn:vim25 ReconfigureSnmpAgentResponse,omitempty"` + Res *types.ReconfigureSnmpAgentResponse `xml:"ReconfigureSnmpAgentResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11165,7 +12105,7 @@ func ReconfigureSnmpAgent(ctx context.Context, r soap.RoundTripper, req *types.R type ReconfigureVirtualMachineReservationBody struct { Req *types.ReconfigureVirtualMachineReservation `xml:"urn:vim25 ReconfigureVirtualMachineReservation,omitempty"` - Res *types.ReconfigureVirtualMachineReservationResponse `xml:"urn:vim25 ReconfigureVirtualMachineReservationResponse,omitempty"` + Res *types.ReconfigureVirtualMachineReservationResponse `xml:"ReconfigureVirtualMachineReservationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11185,7 +12125,7 @@ func ReconfigureVirtualMachineReservation(ctx context.Context, r soap.RoundTripp type ReconnectHost_TaskBody struct { Req *types.ReconnectHost_Task `xml:"urn:vim25 ReconnectHost_Task,omitempty"` - Res *types.ReconnectHost_TaskResponse `xml:"urn:vim25 ReconnectHost_TaskResponse,omitempty"` + Res *types.ReconnectHost_TaskResponse `xml:"ReconnectHost_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11205,7 +12145,7 @@ func ReconnectHost_Task(ctx context.Context, r soap.RoundTripper, req *types.Rec type RectifyDvsHost_TaskBody struct { Req *types.RectifyDvsHost_Task `xml:"urn:vim25 RectifyDvsHost_Task,omitempty"` - Res *types.RectifyDvsHost_TaskResponse `xml:"urn:vim25 RectifyDvsHost_TaskResponse,omitempty"` + Res *types.RectifyDvsHost_TaskResponse `xml:"RectifyDvsHost_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11225,7 +12165,7 @@ func RectifyDvsHost_Task(ctx context.Context, r soap.RoundTripper, req *types.Re type RectifyDvsOnHost_TaskBody struct { Req *types.RectifyDvsOnHost_Task `xml:"urn:vim25 RectifyDvsOnHost_Task,omitempty"` - Res *types.RectifyDvsOnHost_TaskResponse `xml:"urn:vim25 RectifyDvsOnHost_TaskResponse,omitempty"` + Res *types.RectifyDvsOnHost_TaskResponse `xml:"RectifyDvsOnHost_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11245,7 +12185,7 @@ func RectifyDvsOnHost_Task(ctx context.Context, r soap.RoundTripper, req *types. type RefreshBody struct { Req *types.Refresh `xml:"urn:vim25 Refresh,omitempty"` - Res *types.RefreshResponse `xml:"urn:vim25 RefreshResponse,omitempty"` + Res *types.RefreshResponse `xml:"RefreshResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11265,7 +12205,7 @@ func Refresh(ctx context.Context, r soap.RoundTripper, req *types.Refresh) (*typ type RefreshDVPortStateBody struct { Req *types.RefreshDVPortState `xml:"urn:vim25 RefreshDVPortState,omitempty"` - Res *types.RefreshDVPortStateResponse `xml:"urn:vim25 RefreshDVPortStateResponse,omitempty"` + Res *types.RefreshDVPortStateResponse `xml:"RefreshDVPortStateResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11285,7 +12225,7 @@ func RefreshDVPortState(ctx context.Context, r soap.RoundTripper, req *types.Ref type RefreshDatastoreBody struct { Req *types.RefreshDatastore `xml:"urn:vim25 RefreshDatastore,omitempty"` - Res *types.RefreshDatastoreResponse `xml:"urn:vim25 RefreshDatastoreResponse,omitempty"` + Res *types.RefreshDatastoreResponse `xml:"RefreshDatastoreResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11305,7 +12245,7 @@ func RefreshDatastore(ctx context.Context, r soap.RoundTripper, req *types.Refre type RefreshDatastoreStorageInfoBody struct { Req *types.RefreshDatastoreStorageInfo `xml:"urn:vim25 RefreshDatastoreStorageInfo,omitempty"` - Res *types.RefreshDatastoreStorageInfoResponse `xml:"urn:vim25 RefreshDatastoreStorageInfoResponse,omitempty"` + Res *types.RefreshDatastoreStorageInfoResponse `xml:"RefreshDatastoreStorageInfoResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11325,7 +12265,7 @@ func RefreshDatastoreStorageInfo(ctx context.Context, r soap.RoundTripper, req * type RefreshDateTimeSystemBody struct { Req *types.RefreshDateTimeSystem `xml:"urn:vim25 RefreshDateTimeSystem,omitempty"` - Res *types.RefreshDateTimeSystemResponse `xml:"urn:vim25 RefreshDateTimeSystemResponse,omitempty"` + Res *types.RefreshDateTimeSystemResponse `xml:"RefreshDateTimeSystemResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11345,7 +12285,7 @@ func RefreshDateTimeSystem(ctx context.Context, r soap.RoundTripper, req *types. type RefreshFirewallBody struct { Req *types.RefreshFirewall `xml:"urn:vim25 RefreshFirewall,omitempty"` - Res *types.RefreshFirewallResponse `xml:"urn:vim25 RefreshFirewallResponse,omitempty"` + Res *types.RefreshFirewallResponse `xml:"RefreshFirewallResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11365,7 +12305,7 @@ func RefreshFirewall(ctx context.Context, r soap.RoundTripper, req *types.Refres type RefreshGraphicsManagerBody struct { Req *types.RefreshGraphicsManager `xml:"urn:vim25 RefreshGraphicsManager,omitempty"` - Res *types.RefreshGraphicsManagerResponse `xml:"urn:vim25 RefreshGraphicsManagerResponse,omitempty"` + Res *types.RefreshGraphicsManagerResponse `xml:"RefreshGraphicsManagerResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11385,7 +12325,7 @@ func RefreshGraphicsManager(ctx context.Context, r soap.RoundTripper, req *types type RefreshHealthStatusSystemBody struct { Req *types.RefreshHealthStatusSystem `xml:"urn:vim25 RefreshHealthStatusSystem,omitempty"` - Res *types.RefreshHealthStatusSystemResponse `xml:"urn:vim25 RefreshHealthStatusSystemResponse,omitempty"` + Res *types.RefreshHealthStatusSystemResponse `xml:"RefreshHealthStatusSystemResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11405,7 +12345,7 @@ func RefreshHealthStatusSystem(ctx context.Context, r soap.RoundTripper, req *ty type RefreshNetworkSystemBody struct { Req *types.RefreshNetworkSystem `xml:"urn:vim25 RefreshNetworkSystem,omitempty"` - Res *types.RefreshNetworkSystemResponse `xml:"urn:vim25 RefreshNetworkSystemResponse,omitempty"` + Res *types.RefreshNetworkSystemResponse `xml:"RefreshNetworkSystemResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11425,7 +12365,7 @@ func RefreshNetworkSystem(ctx context.Context, r soap.RoundTripper, req *types.R type RefreshRecommendationBody struct { Req *types.RefreshRecommendation `xml:"urn:vim25 RefreshRecommendation,omitempty"` - Res *types.RefreshRecommendationResponse `xml:"urn:vim25 RefreshRecommendationResponse,omitempty"` + Res *types.RefreshRecommendationResponse `xml:"RefreshRecommendationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11445,7 +12385,7 @@ func RefreshRecommendation(ctx context.Context, r soap.RoundTripper, req *types. type RefreshRuntimeBody struct { Req *types.RefreshRuntime `xml:"urn:vim25 RefreshRuntime,omitempty"` - Res *types.RefreshRuntimeResponse `xml:"urn:vim25 RefreshRuntimeResponse,omitempty"` + Res *types.RefreshRuntimeResponse `xml:"RefreshRuntimeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11465,7 +12405,7 @@ func RefreshRuntime(ctx context.Context, r soap.RoundTripper, req *types.Refresh type RefreshServicesBody struct { Req *types.RefreshServices `xml:"urn:vim25 RefreshServices,omitempty"` - Res *types.RefreshServicesResponse `xml:"urn:vim25 RefreshServicesResponse,omitempty"` + Res *types.RefreshServicesResponse `xml:"RefreshServicesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11485,7 +12425,7 @@ func RefreshServices(ctx context.Context, r soap.RoundTripper, req *types.Refres type RefreshStorageDrsRecommendationBody struct { Req *types.RefreshStorageDrsRecommendation `xml:"urn:vim25 RefreshStorageDrsRecommendation,omitempty"` - Res *types.RefreshStorageDrsRecommendationResponse `xml:"urn:vim25 RefreshStorageDrsRecommendationResponse,omitempty"` + Res *types.RefreshStorageDrsRecommendationResponse `xml:"RefreshStorageDrsRecommendationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11505,7 +12445,7 @@ func RefreshStorageDrsRecommendation(ctx context.Context, r soap.RoundTripper, r type RefreshStorageDrsRecommendationsForPod_TaskBody struct { Req *types.RefreshStorageDrsRecommendationsForPod_Task `xml:"urn:vim25 RefreshStorageDrsRecommendationsForPod_Task,omitempty"` - Res *types.RefreshStorageDrsRecommendationsForPod_TaskResponse `xml:"urn:vim25 RefreshStorageDrsRecommendationsForPod_TaskResponse,omitempty"` + Res *types.RefreshStorageDrsRecommendationsForPod_TaskResponse `xml:"RefreshStorageDrsRecommendationsForPod_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11525,7 +12465,7 @@ func RefreshStorageDrsRecommendationsForPod_Task(ctx context.Context, r soap.Rou type RefreshStorageInfoBody struct { Req *types.RefreshStorageInfo `xml:"urn:vim25 RefreshStorageInfo,omitempty"` - Res *types.RefreshStorageInfoResponse `xml:"urn:vim25 RefreshStorageInfoResponse,omitempty"` + Res *types.RefreshStorageInfoResponse `xml:"RefreshStorageInfoResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11545,7 +12485,7 @@ func RefreshStorageInfo(ctx context.Context, r soap.RoundTripper, req *types.Ref type RefreshStorageSystemBody struct { Req *types.RefreshStorageSystem `xml:"urn:vim25 RefreshStorageSystem,omitempty"` - Res *types.RefreshStorageSystemResponse `xml:"urn:vim25 RefreshStorageSystemResponse,omitempty"` + Res *types.RefreshStorageSystemResponse `xml:"RefreshStorageSystemResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11565,7 +12505,7 @@ func RefreshStorageSystem(ctx context.Context, r soap.RoundTripper, req *types.R type RegisterChildVM_TaskBody struct { Req *types.RegisterChildVM_Task `xml:"urn:vim25 RegisterChildVM_Task,omitempty"` - Res *types.RegisterChildVM_TaskResponse `xml:"urn:vim25 RegisterChildVM_TaskResponse,omitempty"` + Res *types.RegisterChildVM_TaskResponse `xml:"RegisterChildVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11585,7 +12525,7 @@ func RegisterChildVM_Task(ctx context.Context, r soap.RoundTripper, req *types.R type RegisterDiskBody struct { Req *types.RegisterDisk `xml:"urn:vim25 RegisterDisk,omitempty"` - Res *types.RegisterDiskResponse `xml:"urn:vim25 RegisterDiskResponse,omitempty"` + Res *types.RegisterDiskResponse `xml:"RegisterDiskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11605,7 +12545,7 @@ func RegisterDisk(ctx context.Context, r soap.RoundTripper, req *types.RegisterD type RegisterExtensionBody struct { Req *types.RegisterExtension `xml:"urn:vim25 RegisterExtension,omitempty"` - Res *types.RegisterExtensionResponse `xml:"urn:vim25 RegisterExtensionResponse,omitempty"` + Res *types.RegisterExtensionResponse `xml:"RegisterExtensionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11625,7 +12565,7 @@ func RegisterExtension(ctx context.Context, r soap.RoundTripper, req *types.Regi type RegisterHealthUpdateProviderBody struct { Req *types.RegisterHealthUpdateProvider `xml:"urn:vim25 RegisterHealthUpdateProvider,omitempty"` - Res *types.RegisterHealthUpdateProviderResponse `xml:"urn:vim25 RegisterHealthUpdateProviderResponse,omitempty"` + Res *types.RegisterHealthUpdateProviderResponse `xml:"RegisterHealthUpdateProviderResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11645,7 +12585,7 @@ func RegisterHealthUpdateProvider(ctx context.Context, r soap.RoundTripper, req type RegisterKmipServerBody struct { Req *types.RegisterKmipServer `xml:"urn:vim25 RegisterKmipServer,omitempty"` - Res *types.RegisterKmipServerResponse `xml:"urn:vim25 RegisterKmipServerResponse,omitempty"` + Res *types.RegisterKmipServerResponse `xml:"RegisterKmipServerResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11663,9 +12603,29 @@ func RegisterKmipServer(ctx context.Context, r soap.RoundTripper, req *types.Reg return resBody.Res, nil } +type RegisterKmsClusterBody struct { + Req *types.RegisterKmsCluster `xml:"urn:vim25 RegisterKmsCluster,omitempty"` + Res *types.RegisterKmsClusterResponse `xml:"RegisterKmsClusterResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RegisterKmsClusterBody) Fault() *soap.Fault { return b.Fault_ } + +func RegisterKmsCluster(ctx context.Context, r soap.RoundTripper, req *types.RegisterKmsCluster) (*types.RegisterKmsClusterResponse, error) { + var reqBody, resBody RegisterKmsClusterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type RegisterVM_TaskBody struct { Req *types.RegisterVM_Task `xml:"urn:vim25 RegisterVM_Task,omitempty"` - Res *types.RegisterVM_TaskResponse `xml:"urn:vim25 RegisterVM_TaskResponse,omitempty"` + Res *types.RegisterVM_TaskResponse `xml:"RegisterVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11685,7 +12645,7 @@ func RegisterVM_Task(ctx context.Context, r soap.RoundTripper, req *types.Regist type ReleaseCredentialsInGuestBody struct { Req *types.ReleaseCredentialsInGuest `xml:"urn:vim25 ReleaseCredentialsInGuest,omitempty"` - Res *types.ReleaseCredentialsInGuestResponse `xml:"urn:vim25 ReleaseCredentialsInGuestResponse,omitempty"` + Res *types.ReleaseCredentialsInGuestResponse `xml:"ReleaseCredentialsInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11705,7 +12665,7 @@ func ReleaseCredentialsInGuest(ctx context.Context, r soap.RoundTripper, req *ty type ReleaseIpAllocationBody struct { Req *types.ReleaseIpAllocation `xml:"urn:vim25 ReleaseIpAllocation,omitempty"` - Res *types.ReleaseIpAllocationResponse `xml:"urn:vim25 ReleaseIpAllocationResponse,omitempty"` + Res *types.ReleaseIpAllocationResponse `xml:"ReleaseIpAllocationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11725,7 +12685,7 @@ func ReleaseIpAllocation(ctx context.Context, r soap.RoundTripper, req *types.Re type ReleaseManagedSnapshotBody struct { Req *types.ReleaseManagedSnapshot `xml:"urn:vim25 ReleaseManagedSnapshot,omitempty"` - Res *types.ReleaseManagedSnapshotResponse `xml:"urn:vim25 ReleaseManagedSnapshotResponse,omitempty"` + Res *types.ReleaseManagedSnapshotResponse `xml:"ReleaseManagedSnapshotResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11745,7 +12705,7 @@ func ReleaseManagedSnapshot(ctx context.Context, r soap.RoundTripper, req *types type ReloadBody struct { Req *types.Reload `xml:"urn:vim25 Reload,omitempty"` - Res *types.ReloadResponse `xml:"urn:vim25 ReloadResponse,omitempty"` + Res *types.ReloadResponse `xml:"ReloadResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11765,7 +12725,7 @@ func Reload(ctx context.Context, r soap.RoundTripper, req *types.Reload) (*types type RelocateVM_TaskBody struct { Req *types.RelocateVM_Task `xml:"urn:vim25 RelocateVM_Task,omitempty"` - Res *types.RelocateVM_TaskResponse `xml:"urn:vim25 RelocateVM_TaskResponse,omitempty"` + Res *types.RelocateVM_TaskResponse `xml:"RelocateVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11785,7 +12745,7 @@ func RelocateVM_Task(ctx context.Context, r soap.RoundTripper, req *types.Reloca type RelocateVStorageObject_TaskBody struct { Req *types.RelocateVStorageObject_Task `xml:"urn:vim25 RelocateVStorageObject_Task,omitempty"` - Res *types.RelocateVStorageObject_TaskResponse `xml:"urn:vim25 RelocateVStorageObject_TaskResponse,omitempty"` + Res *types.RelocateVStorageObject_TaskResponse `xml:"RelocateVStorageObject_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11805,7 +12765,7 @@ func RelocateVStorageObject_Task(ctx context.Context, r soap.RoundTripper, req * type RemoveAlarmBody struct { Req *types.RemoveAlarm `xml:"urn:vim25 RemoveAlarm,omitempty"` - Res *types.RemoveAlarmResponse `xml:"urn:vim25 RemoveAlarmResponse,omitempty"` + Res *types.RemoveAlarmResponse `xml:"RemoveAlarmResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11825,7 +12785,7 @@ func RemoveAlarm(ctx context.Context, r soap.RoundTripper, req *types.RemoveAlar type RemoveAllSnapshots_TaskBody struct { Req *types.RemoveAllSnapshots_Task `xml:"urn:vim25 RemoveAllSnapshots_Task,omitempty"` - Res *types.RemoveAllSnapshots_TaskResponse `xml:"urn:vim25 RemoveAllSnapshots_TaskResponse,omitempty"` + Res *types.RemoveAllSnapshots_TaskResponse `xml:"RemoveAllSnapshots_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11845,7 +12805,7 @@ func RemoveAllSnapshots_Task(ctx context.Context, r soap.RoundTripper, req *type type RemoveAssignedLicenseBody struct { Req *types.RemoveAssignedLicense `xml:"urn:vim25 RemoveAssignedLicense,omitempty"` - Res *types.RemoveAssignedLicenseResponse `xml:"urn:vim25 RemoveAssignedLicenseResponse,omitempty"` + Res *types.RemoveAssignedLicenseResponse `xml:"RemoveAssignedLicenseResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11865,7 +12825,7 @@ func RemoveAssignedLicense(ctx context.Context, r soap.RoundTripper, req *types. type RemoveAuthorizationRoleBody struct { Req *types.RemoveAuthorizationRole `xml:"urn:vim25 RemoveAuthorizationRole,omitempty"` - Res *types.RemoveAuthorizationRoleResponse `xml:"urn:vim25 RemoveAuthorizationRoleResponse,omitempty"` + Res *types.RemoveAuthorizationRoleResponse `xml:"RemoveAuthorizationRoleResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11885,7 +12845,7 @@ func RemoveAuthorizationRole(ctx context.Context, r soap.RoundTripper, req *type type RemoveCustomFieldDefBody struct { Req *types.RemoveCustomFieldDef `xml:"urn:vim25 RemoveCustomFieldDef,omitempty"` - Res *types.RemoveCustomFieldDefResponse `xml:"urn:vim25 RemoveCustomFieldDefResponse,omitempty"` + Res *types.RemoveCustomFieldDefResponse `xml:"RemoveCustomFieldDefResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11905,7 +12865,7 @@ func RemoveCustomFieldDef(ctx context.Context, r soap.RoundTripper, req *types.R type RemoveDatastoreBody struct { Req *types.RemoveDatastore `xml:"urn:vim25 RemoveDatastore,omitempty"` - Res *types.RemoveDatastoreResponse `xml:"urn:vim25 RemoveDatastoreResponse,omitempty"` + Res *types.RemoveDatastoreResponse `xml:"RemoveDatastoreResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11925,7 +12885,7 @@ func RemoveDatastore(ctx context.Context, r soap.RoundTripper, req *types.Remove type RemoveDatastoreEx_TaskBody struct { Req *types.RemoveDatastoreEx_Task `xml:"urn:vim25 RemoveDatastoreEx_Task,omitempty"` - Res *types.RemoveDatastoreEx_TaskResponse `xml:"urn:vim25 RemoveDatastoreEx_TaskResponse,omitempty"` + Res *types.RemoveDatastoreEx_TaskResponse `xml:"RemoveDatastoreEx_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11945,7 +12905,7 @@ func RemoveDatastoreEx_Task(ctx context.Context, r soap.RoundTripper, req *types type RemoveDiskMapping_TaskBody struct { Req *types.RemoveDiskMapping_Task `xml:"urn:vim25 RemoveDiskMapping_Task,omitempty"` - Res *types.RemoveDiskMapping_TaskResponse `xml:"urn:vim25 RemoveDiskMapping_TaskResponse,omitempty"` + Res *types.RemoveDiskMapping_TaskResponse `xml:"RemoveDiskMapping_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11965,7 +12925,7 @@ func RemoveDiskMapping_Task(ctx context.Context, r soap.RoundTripper, req *types type RemoveDisk_TaskBody struct { Req *types.RemoveDisk_Task `xml:"urn:vim25 RemoveDisk_Task,omitempty"` - Res *types.RemoveDisk_TaskResponse `xml:"urn:vim25 RemoveDisk_TaskResponse,omitempty"` + Res *types.RemoveDisk_TaskResponse `xml:"RemoveDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11985,7 +12945,7 @@ func RemoveDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.Remove type RemoveEntityPermissionBody struct { Req *types.RemoveEntityPermission `xml:"urn:vim25 RemoveEntityPermission,omitempty"` - Res *types.RemoveEntityPermissionResponse `xml:"urn:vim25 RemoveEntityPermissionResponse,omitempty"` + Res *types.RemoveEntityPermissionResponse `xml:"RemoveEntityPermissionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12005,7 +12965,7 @@ func RemoveEntityPermission(ctx context.Context, r soap.RoundTripper, req *types type RemoveFilterBody struct { Req *types.RemoveFilter `xml:"urn:vim25 RemoveFilter,omitempty"` - Res *types.RemoveFilterResponse `xml:"urn:vim25 RemoveFilterResponse,omitempty"` + Res *types.RemoveFilterResponse `xml:"RemoveFilterResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12025,7 +12985,7 @@ func RemoveFilter(ctx context.Context, r soap.RoundTripper, req *types.RemoveFil type RemoveFilterEntitiesBody struct { Req *types.RemoveFilterEntities `xml:"urn:vim25 RemoveFilterEntities,omitempty"` - Res *types.RemoveFilterEntitiesResponse `xml:"urn:vim25 RemoveFilterEntitiesResponse,omitempty"` + Res *types.RemoveFilterEntitiesResponse `xml:"RemoveFilterEntitiesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12045,7 +13005,7 @@ func RemoveFilterEntities(ctx context.Context, r soap.RoundTripper, req *types.R type RemoveGroupBody struct { Req *types.RemoveGroup `xml:"urn:vim25 RemoveGroup,omitempty"` - Res *types.RemoveGroupResponse `xml:"urn:vim25 RemoveGroupResponse,omitempty"` + Res *types.RemoveGroupResponse `xml:"RemoveGroupResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12065,7 +13025,7 @@ func RemoveGroup(ctx context.Context, r soap.RoundTripper, req *types.RemoveGrou type RemoveGuestAliasBody struct { Req *types.RemoveGuestAlias `xml:"urn:vim25 RemoveGuestAlias,omitempty"` - Res *types.RemoveGuestAliasResponse `xml:"urn:vim25 RemoveGuestAliasResponse,omitempty"` + Res *types.RemoveGuestAliasResponse `xml:"RemoveGuestAliasResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12085,7 +13045,7 @@ func RemoveGuestAlias(ctx context.Context, r soap.RoundTripper, req *types.Remov type RemoveGuestAliasByCertBody struct { Req *types.RemoveGuestAliasByCert `xml:"urn:vim25 RemoveGuestAliasByCert,omitempty"` - Res *types.RemoveGuestAliasByCertResponse `xml:"urn:vim25 RemoveGuestAliasByCertResponse,omitempty"` + Res *types.RemoveGuestAliasByCertResponse `xml:"RemoveGuestAliasByCertResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12105,7 +13065,7 @@ func RemoveGuestAliasByCert(ctx context.Context, r soap.RoundTripper, req *types type RemoveInternetScsiSendTargetsBody struct { Req *types.RemoveInternetScsiSendTargets `xml:"urn:vim25 RemoveInternetScsiSendTargets,omitempty"` - Res *types.RemoveInternetScsiSendTargetsResponse `xml:"urn:vim25 RemoveInternetScsiSendTargetsResponse,omitempty"` + Res *types.RemoveInternetScsiSendTargetsResponse `xml:"RemoveInternetScsiSendTargetsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12125,7 +13085,7 @@ func RemoveInternetScsiSendTargets(ctx context.Context, r soap.RoundTripper, req type RemoveInternetScsiStaticTargetsBody struct { Req *types.RemoveInternetScsiStaticTargets `xml:"urn:vim25 RemoveInternetScsiStaticTargets,omitempty"` - Res *types.RemoveInternetScsiStaticTargetsResponse `xml:"urn:vim25 RemoveInternetScsiStaticTargetsResponse,omitempty"` + Res *types.RemoveInternetScsiStaticTargetsResponse `xml:"RemoveInternetScsiStaticTargetsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12145,7 +13105,7 @@ func RemoveInternetScsiStaticTargets(ctx context.Context, r soap.RoundTripper, r type RemoveKeyBody struct { Req *types.RemoveKey `xml:"urn:vim25 RemoveKey,omitempty"` - Res *types.RemoveKeyResponse `xml:"urn:vim25 RemoveKeyResponse,omitempty"` + Res *types.RemoveKeyResponse `xml:"RemoveKeyResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12165,7 +13125,7 @@ func RemoveKey(ctx context.Context, r soap.RoundTripper, req *types.RemoveKey) ( type RemoveKeysBody struct { Req *types.RemoveKeys `xml:"urn:vim25 RemoveKeys,omitempty"` - Res *types.RemoveKeysResponse `xml:"urn:vim25 RemoveKeysResponse,omitempty"` + Res *types.RemoveKeysResponse `xml:"RemoveKeysResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12185,7 +13145,7 @@ func RemoveKeys(ctx context.Context, r soap.RoundTripper, req *types.RemoveKeys) type RemoveKmipServerBody struct { Req *types.RemoveKmipServer `xml:"urn:vim25 RemoveKmipServer,omitempty"` - Res *types.RemoveKmipServerResponse `xml:"urn:vim25 RemoveKmipServerResponse,omitempty"` + Res *types.RemoveKmipServerResponse `xml:"RemoveKmipServerResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12205,7 +13165,7 @@ func RemoveKmipServer(ctx context.Context, r soap.RoundTripper, req *types.Remov type RemoveLicenseBody struct { Req *types.RemoveLicense `xml:"urn:vim25 RemoveLicense,omitempty"` - Res *types.RemoveLicenseResponse `xml:"urn:vim25 RemoveLicenseResponse,omitempty"` + Res *types.RemoveLicenseResponse `xml:"RemoveLicenseResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12225,7 +13185,7 @@ func RemoveLicense(ctx context.Context, r soap.RoundTripper, req *types.RemoveLi type RemoveLicenseLabelBody struct { Req *types.RemoveLicenseLabel `xml:"urn:vim25 RemoveLicenseLabel,omitempty"` - Res *types.RemoveLicenseLabelResponse `xml:"urn:vim25 RemoveLicenseLabelResponse,omitempty"` + Res *types.RemoveLicenseLabelResponse `xml:"RemoveLicenseLabelResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12245,7 +13205,7 @@ func RemoveLicenseLabel(ctx context.Context, r soap.RoundTripper, req *types.Rem type RemoveMonitoredEntitiesBody struct { Req *types.RemoveMonitoredEntities `xml:"urn:vim25 RemoveMonitoredEntities,omitempty"` - Res *types.RemoveMonitoredEntitiesResponse `xml:"urn:vim25 RemoveMonitoredEntitiesResponse,omitempty"` + Res *types.RemoveMonitoredEntitiesResponse `xml:"RemoveMonitoredEntitiesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12265,7 +13225,7 @@ func RemoveMonitoredEntities(ctx context.Context, r soap.RoundTripper, req *type type RemoveNetworkResourcePoolBody struct { Req *types.RemoveNetworkResourcePool `xml:"urn:vim25 RemoveNetworkResourcePool,omitempty"` - Res *types.RemoveNetworkResourcePoolResponse `xml:"urn:vim25 RemoveNetworkResourcePoolResponse,omitempty"` + Res *types.RemoveNetworkResourcePoolResponse `xml:"RemoveNetworkResourcePoolResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12283,9 +13243,29 @@ func RemoveNetworkResourcePool(ctx context.Context, r soap.RoundTripper, req *ty return resBody.Res, nil } +type RemoveNvmeOverRdmaAdapterBody struct { + Req *types.RemoveNvmeOverRdmaAdapter `xml:"urn:vim25 RemoveNvmeOverRdmaAdapter,omitempty"` + Res *types.RemoveNvmeOverRdmaAdapterResponse `xml:"RemoveNvmeOverRdmaAdapterResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveNvmeOverRdmaAdapterBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveNvmeOverRdmaAdapter(ctx context.Context, r soap.RoundTripper, req *types.RemoveNvmeOverRdmaAdapter) (*types.RemoveNvmeOverRdmaAdapterResponse, error) { + var reqBody, resBody RemoveNvmeOverRdmaAdapterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type RemovePerfIntervalBody struct { Req *types.RemovePerfInterval `xml:"urn:vim25 RemovePerfInterval,omitempty"` - Res *types.RemovePerfIntervalResponse `xml:"urn:vim25 RemovePerfIntervalResponse,omitempty"` + Res *types.RemovePerfIntervalResponse `xml:"RemovePerfIntervalResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12305,7 +13285,7 @@ func RemovePerfInterval(ctx context.Context, r soap.RoundTripper, req *types.Rem type RemovePortGroupBody struct { Req *types.RemovePortGroup `xml:"urn:vim25 RemovePortGroup,omitempty"` - Res *types.RemovePortGroupResponse `xml:"urn:vim25 RemovePortGroupResponse,omitempty"` + Res *types.RemovePortGroupResponse `xml:"RemovePortGroupResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12325,7 +13305,7 @@ func RemovePortGroup(ctx context.Context, r soap.RoundTripper, req *types.Remove type RemoveScheduledTaskBody struct { Req *types.RemoveScheduledTask `xml:"urn:vim25 RemoveScheduledTask,omitempty"` - Res *types.RemoveScheduledTaskResponse `xml:"urn:vim25 RemoveScheduledTaskResponse,omitempty"` + Res *types.RemoveScheduledTaskResponse `xml:"RemoveScheduledTaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12345,7 +13325,7 @@ func RemoveScheduledTask(ctx context.Context, r soap.RoundTripper, req *types.Re type RemoveServiceConsoleVirtualNicBody struct { Req *types.RemoveServiceConsoleVirtualNic `xml:"urn:vim25 RemoveServiceConsoleVirtualNic,omitempty"` - Res *types.RemoveServiceConsoleVirtualNicResponse `xml:"urn:vim25 RemoveServiceConsoleVirtualNicResponse,omitempty"` + Res *types.RemoveServiceConsoleVirtualNicResponse `xml:"RemoveServiceConsoleVirtualNicResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12365,7 +13345,7 @@ func RemoveServiceConsoleVirtualNic(ctx context.Context, r soap.RoundTripper, re type RemoveSmartCardTrustAnchorBody struct { Req *types.RemoveSmartCardTrustAnchor `xml:"urn:vim25 RemoveSmartCardTrustAnchor,omitempty"` - Res *types.RemoveSmartCardTrustAnchorResponse `xml:"urn:vim25 RemoveSmartCardTrustAnchorResponse,omitempty"` + Res *types.RemoveSmartCardTrustAnchorResponse `xml:"RemoveSmartCardTrustAnchorResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12385,7 +13365,7 @@ func RemoveSmartCardTrustAnchor(ctx context.Context, r soap.RoundTripper, req *t type RemoveSmartCardTrustAnchorByFingerprintBody struct { Req *types.RemoveSmartCardTrustAnchorByFingerprint `xml:"urn:vim25 RemoveSmartCardTrustAnchorByFingerprint,omitempty"` - Res *types.RemoveSmartCardTrustAnchorByFingerprintResponse `xml:"urn:vim25 RemoveSmartCardTrustAnchorByFingerprintResponse,omitempty"` + Res *types.RemoveSmartCardTrustAnchorByFingerprintResponse `xml:"RemoveSmartCardTrustAnchorByFingerprintResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12405,7 +13385,7 @@ func RemoveSmartCardTrustAnchorByFingerprint(ctx context.Context, r soap.RoundTr type RemoveSnapshot_TaskBody struct { Req *types.RemoveSnapshot_Task `xml:"urn:vim25 RemoveSnapshot_Task,omitempty"` - Res *types.RemoveSnapshot_TaskResponse `xml:"urn:vim25 RemoveSnapshot_TaskResponse,omitempty"` + Res *types.RemoveSnapshot_TaskResponse `xml:"RemoveSnapshot_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12423,9 +13403,29 @@ func RemoveSnapshot_Task(ctx context.Context, r soap.RoundTripper, req *types.Re return resBody.Res, nil } +type RemoveSoftwareAdapterBody struct { + Req *types.RemoveSoftwareAdapter `xml:"urn:vim25 RemoveSoftwareAdapter,omitempty"` + Res *types.RemoveSoftwareAdapterResponse `xml:"RemoveSoftwareAdapterResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveSoftwareAdapterBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveSoftwareAdapter(ctx context.Context, r soap.RoundTripper, req *types.RemoveSoftwareAdapter) (*types.RemoveSoftwareAdapterResponse, error) { + var reqBody, resBody RemoveSoftwareAdapterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type RemoveUserBody struct { Req *types.RemoveUser `xml:"urn:vim25 RemoveUser,omitempty"` - Res *types.RemoveUserResponse `xml:"urn:vim25 RemoveUserResponse,omitempty"` + Res *types.RemoveUserResponse `xml:"RemoveUserResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12445,7 +13445,7 @@ func RemoveUser(ctx context.Context, r soap.RoundTripper, req *types.RemoveUser) type RemoveVirtualNicBody struct { Req *types.RemoveVirtualNic `xml:"urn:vim25 RemoveVirtualNic,omitempty"` - Res *types.RemoveVirtualNicResponse `xml:"urn:vim25 RemoveVirtualNicResponse,omitempty"` + Res *types.RemoveVirtualNicResponse `xml:"RemoveVirtualNicResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12465,7 +13465,7 @@ func RemoveVirtualNic(ctx context.Context, r soap.RoundTripper, req *types.Remov type RemoveVirtualSwitchBody struct { Req *types.RemoveVirtualSwitch `xml:"urn:vim25 RemoveVirtualSwitch,omitempty"` - Res *types.RemoveVirtualSwitchResponse `xml:"urn:vim25 RemoveVirtualSwitchResponse,omitempty"` + Res *types.RemoveVirtualSwitchResponse `xml:"RemoveVirtualSwitchResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12485,7 +13485,7 @@ func RemoveVirtualSwitch(ctx context.Context, r soap.RoundTripper, req *types.Re type RenameCustomFieldDefBody struct { Req *types.RenameCustomFieldDef `xml:"urn:vim25 RenameCustomFieldDef,omitempty"` - Res *types.RenameCustomFieldDefResponse `xml:"urn:vim25 RenameCustomFieldDefResponse,omitempty"` + Res *types.RenameCustomFieldDefResponse `xml:"RenameCustomFieldDefResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12505,7 +13505,7 @@ func RenameCustomFieldDef(ctx context.Context, r soap.RoundTripper, req *types.R type RenameCustomizationSpecBody struct { Req *types.RenameCustomizationSpec `xml:"urn:vim25 RenameCustomizationSpec,omitempty"` - Res *types.RenameCustomizationSpecResponse `xml:"urn:vim25 RenameCustomizationSpecResponse,omitempty"` + Res *types.RenameCustomizationSpecResponse `xml:"RenameCustomizationSpecResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12525,7 +13525,7 @@ func RenameCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *type type RenameDatastoreBody struct { Req *types.RenameDatastore `xml:"urn:vim25 RenameDatastore,omitempty"` - Res *types.RenameDatastoreResponse `xml:"urn:vim25 RenameDatastoreResponse,omitempty"` + Res *types.RenameDatastoreResponse `xml:"RenameDatastoreResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12545,7 +13545,7 @@ func RenameDatastore(ctx context.Context, r soap.RoundTripper, req *types.Rename type RenameSnapshotBody struct { Req *types.RenameSnapshot `xml:"urn:vim25 RenameSnapshot,omitempty"` - Res *types.RenameSnapshotResponse `xml:"urn:vim25 RenameSnapshotResponse,omitempty"` + Res *types.RenameSnapshotResponse `xml:"RenameSnapshotResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12565,7 +13565,7 @@ func RenameSnapshot(ctx context.Context, r soap.RoundTripper, req *types.RenameS type RenameVStorageObjectBody struct { Req *types.RenameVStorageObject `xml:"urn:vim25 RenameVStorageObject,omitempty"` - Res *types.RenameVStorageObjectResponse `xml:"urn:vim25 RenameVStorageObjectResponse,omitempty"` + Res *types.RenameVStorageObjectResponse `xml:"RenameVStorageObjectResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12585,7 +13585,7 @@ func RenameVStorageObject(ctx context.Context, r soap.RoundTripper, req *types.R type Rename_TaskBody struct { Req *types.Rename_Task `xml:"urn:vim25 Rename_Task,omitempty"` - Res *types.Rename_TaskResponse `xml:"urn:vim25 Rename_TaskResponse,omitempty"` + Res *types.Rename_TaskResponse `xml:"Rename_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12605,7 +13605,7 @@ func Rename_Task(ctx context.Context, r soap.RoundTripper, req *types.Rename_Tas type ReplaceCACertificatesAndCRLsBody struct { Req *types.ReplaceCACertificatesAndCRLs `xml:"urn:vim25 ReplaceCACertificatesAndCRLs,omitempty"` - Res *types.ReplaceCACertificatesAndCRLsResponse `xml:"urn:vim25 ReplaceCACertificatesAndCRLsResponse,omitempty"` + Res *types.ReplaceCACertificatesAndCRLsResponse `xml:"ReplaceCACertificatesAndCRLsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12625,7 +13625,7 @@ func ReplaceCACertificatesAndCRLs(ctx context.Context, r soap.RoundTripper, req type ReplaceSmartCardTrustAnchorsBody struct { Req *types.ReplaceSmartCardTrustAnchors `xml:"urn:vim25 ReplaceSmartCardTrustAnchors,omitempty"` - Res *types.ReplaceSmartCardTrustAnchorsResponse `xml:"urn:vim25 ReplaceSmartCardTrustAnchorsResponse,omitempty"` + Res *types.ReplaceSmartCardTrustAnchorsResponse `xml:"ReplaceSmartCardTrustAnchorsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12645,7 +13645,7 @@ func ReplaceSmartCardTrustAnchors(ctx context.Context, r soap.RoundTripper, req type RescanAllHbaBody struct { Req *types.RescanAllHba `xml:"urn:vim25 RescanAllHba,omitempty"` - Res *types.RescanAllHbaResponse `xml:"urn:vim25 RescanAllHbaResponse,omitempty"` + Res *types.RescanAllHbaResponse `xml:"RescanAllHbaResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12665,7 +13665,7 @@ func RescanAllHba(ctx context.Context, r soap.RoundTripper, req *types.RescanAll type RescanHbaBody struct { Req *types.RescanHba `xml:"urn:vim25 RescanHba,omitempty"` - Res *types.RescanHbaResponse `xml:"urn:vim25 RescanHbaResponse,omitempty"` + Res *types.RescanHbaResponse `xml:"RescanHbaResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12685,7 +13685,7 @@ func RescanHba(ctx context.Context, r soap.RoundTripper, req *types.RescanHba) ( type RescanVffsBody struct { Req *types.RescanVffs `xml:"urn:vim25 RescanVffs,omitempty"` - Res *types.RescanVffsResponse `xml:"urn:vim25 RescanVffsResponse,omitempty"` + Res *types.RescanVffsResponse `xml:"RescanVffsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12705,7 +13705,7 @@ func RescanVffs(ctx context.Context, r soap.RoundTripper, req *types.RescanVffs) type RescanVmfsBody struct { Req *types.RescanVmfs `xml:"urn:vim25 RescanVmfs,omitempty"` - Res *types.RescanVmfsResponse `xml:"urn:vim25 RescanVmfsResponse,omitempty"` + Res *types.RescanVmfsResponse `xml:"RescanVmfsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12725,7 +13725,7 @@ func RescanVmfs(ctx context.Context, r soap.RoundTripper, req *types.RescanVmfs) type ResetCollectorBody struct { Req *types.ResetCollector `xml:"urn:vim25 ResetCollector,omitempty"` - Res *types.ResetCollectorResponse `xml:"urn:vim25 ResetCollectorResponse,omitempty"` + Res *types.ResetCollectorResponse `xml:"ResetCollectorResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12745,7 +13745,7 @@ func ResetCollector(ctx context.Context, r soap.RoundTripper, req *types.ResetCo type ResetCounterLevelMappingBody struct { Req *types.ResetCounterLevelMapping `xml:"urn:vim25 ResetCounterLevelMapping,omitempty"` - Res *types.ResetCounterLevelMappingResponse `xml:"urn:vim25 ResetCounterLevelMappingResponse,omitempty"` + Res *types.ResetCounterLevelMappingResponse `xml:"ResetCounterLevelMappingResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12765,7 +13765,7 @@ func ResetCounterLevelMapping(ctx context.Context, r soap.RoundTripper, req *typ type ResetEntityPermissionsBody struct { Req *types.ResetEntityPermissions `xml:"urn:vim25 ResetEntityPermissions,omitempty"` - Res *types.ResetEntityPermissionsResponse `xml:"urn:vim25 ResetEntityPermissionsResponse,omitempty"` + Res *types.ResetEntityPermissionsResponse `xml:"ResetEntityPermissionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12785,7 +13785,7 @@ func ResetEntityPermissions(ctx context.Context, r soap.RoundTripper, req *types type ResetFirmwareToFactoryDefaultsBody struct { Req *types.ResetFirmwareToFactoryDefaults `xml:"urn:vim25 ResetFirmwareToFactoryDefaults,omitempty"` - Res *types.ResetFirmwareToFactoryDefaultsResponse `xml:"urn:vim25 ResetFirmwareToFactoryDefaultsResponse,omitempty"` + Res *types.ResetFirmwareToFactoryDefaultsResponse `xml:"ResetFirmwareToFactoryDefaultsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12805,7 +13805,7 @@ func ResetFirmwareToFactoryDefaults(ctx context.Context, r soap.RoundTripper, re type ResetGuestInformationBody struct { Req *types.ResetGuestInformation `xml:"urn:vim25 ResetGuestInformation,omitempty"` - Res *types.ResetGuestInformationResponse `xml:"urn:vim25 ResetGuestInformationResponse,omitempty"` + Res *types.ResetGuestInformationResponse `xml:"ResetGuestInformationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12825,7 +13825,7 @@ func ResetGuestInformation(ctx context.Context, r soap.RoundTripper, req *types. type ResetListViewBody struct { Req *types.ResetListView `xml:"urn:vim25 ResetListView,omitempty"` - Res *types.ResetListViewResponse `xml:"urn:vim25 ResetListViewResponse,omitempty"` + Res *types.ResetListViewResponse `xml:"ResetListViewResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12845,7 +13845,7 @@ func ResetListView(ctx context.Context, r soap.RoundTripper, req *types.ResetLis type ResetListViewFromViewBody struct { Req *types.ResetListViewFromView `xml:"urn:vim25 ResetListViewFromView,omitempty"` - Res *types.ResetListViewFromViewResponse `xml:"urn:vim25 ResetListViewFromViewResponse,omitempty"` + Res *types.ResetListViewFromViewResponse `xml:"ResetListViewFromViewResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12865,7 +13865,7 @@ func ResetListViewFromView(ctx context.Context, r soap.RoundTripper, req *types. type ResetSystemHealthInfoBody struct { Req *types.ResetSystemHealthInfo `xml:"urn:vim25 ResetSystemHealthInfo,omitempty"` - Res *types.ResetSystemHealthInfoResponse `xml:"urn:vim25 ResetSystemHealthInfoResponse,omitempty"` + Res *types.ResetSystemHealthInfoResponse `xml:"ResetSystemHealthInfoResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12885,7 +13885,7 @@ func ResetSystemHealthInfo(ctx context.Context, r soap.RoundTripper, req *types. type ResetVM_TaskBody struct { Req *types.ResetVM_Task `xml:"urn:vim25 ResetVM_Task,omitempty"` - Res *types.ResetVM_TaskResponse `xml:"urn:vim25 ResetVM_TaskResponse,omitempty"` + Res *types.ResetVM_TaskResponse `xml:"ResetVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12905,7 +13905,7 @@ func ResetVM_Task(ctx context.Context, r soap.RoundTripper, req *types.ResetVM_T type ResignatureUnresolvedVmfsVolume_TaskBody struct { Req *types.ResignatureUnresolvedVmfsVolume_Task `xml:"urn:vim25 ResignatureUnresolvedVmfsVolume_Task,omitempty"` - Res *types.ResignatureUnresolvedVmfsVolume_TaskResponse `xml:"urn:vim25 ResignatureUnresolvedVmfsVolume_TaskResponse,omitempty"` + Res *types.ResignatureUnresolvedVmfsVolume_TaskResponse `xml:"ResignatureUnresolvedVmfsVolume_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12925,7 +13925,7 @@ func ResignatureUnresolvedVmfsVolume_Task(ctx context.Context, r soap.RoundTripp type ResolveInstallationErrorsOnCluster_TaskBody struct { Req *types.ResolveInstallationErrorsOnCluster_Task `xml:"urn:vim25 ResolveInstallationErrorsOnCluster_Task,omitempty"` - Res *types.ResolveInstallationErrorsOnCluster_TaskResponse `xml:"urn:vim25 ResolveInstallationErrorsOnCluster_TaskResponse,omitempty"` + Res *types.ResolveInstallationErrorsOnCluster_TaskResponse `xml:"ResolveInstallationErrorsOnCluster_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12945,7 +13945,7 @@ func ResolveInstallationErrorsOnCluster_Task(ctx context.Context, r soap.RoundTr type ResolveInstallationErrorsOnHost_TaskBody struct { Req *types.ResolveInstallationErrorsOnHost_Task `xml:"urn:vim25 ResolveInstallationErrorsOnHost_Task,omitempty"` - Res *types.ResolveInstallationErrorsOnHost_TaskResponse `xml:"urn:vim25 ResolveInstallationErrorsOnHost_TaskResponse,omitempty"` + Res *types.ResolveInstallationErrorsOnHost_TaskResponse `xml:"ResolveInstallationErrorsOnHost_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12965,7 +13965,7 @@ func ResolveInstallationErrorsOnHost_Task(ctx context.Context, r soap.RoundTripp type ResolveMultipleUnresolvedVmfsVolumesBody struct { Req *types.ResolveMultipleUnresolvedVmfsVolumes `xml:"urn:vim25 ResolveMultipleUnresolvedVmfsVolumes,omitempty"` - Res *types.ResolveMultipleUnresolvedVmfsVolumesResponse `xml:"urn:vim25 ResolveMultipleUnresolvedVmfsVolumesResponse,omitempty"` + Res *types.ResolveMultipleUnresolvedVmfsVolumesResponse `xml:"ResolveMultipleUnresolvedVmfsVolumesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12985,7 +13985,7 @@ func ResolveMultipleUnresolvedVmfsVolumes(ctx context.Context, r soap.RoundTripp type ResolveMultipleUnresolvedVmfsVolumesEx_TaskBody struct { Req *types.ResolveMultipleUnresolvedVmfsVolumesEx_Task `xml:"urn:vim25 ResolveMultipleUnresolvedVmfsVolumesEx_Task,omitempty"` - Res *types.ResolveMultipleUnresolvedVmfsVolumesEx_TaskResponse `xml:"urn:vim25 ResolveMultipleUnresolvedVmfsVolumesEx_TaskResponse,omitempty"` + Res *types.ResolveMultipleUnresolvedVmfsVolumesEx_TaskResponse `xml:"ResolveMultipleUnresolvedVmfsVolumesEx_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13005,7 +14005,7 @@ func ResolveMultipleUnresolvedVmfsVolumesEx_Task(ctx context.Context, r soap.Rou type RestartServiceBody struct { Req *types.RestartService `xml:"urn:vim25 RestartService,omitempty"` - Res *types.RestartServiceResponse `xml:"urn:vim25 RestartServiceResponse,omitempty"` + Res *types.RestartServiceResponse `xml:"RestartServiceResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13025,7 +14025,7 @@ func RestartService(ctx context.Context, r soap.RoundTripper, req *types.Restart type RestartServiceConsoleVirtualNicBody struct { Req *types.RestartServiceConsoleVirtualNic `xml:"urn:vim25 RestartServiceConsoleVirtualNic,omitempty"` - Res *types.RestartServiceConsoleVirtualNicResponse `xml:"urn:vim25 RestartServiceConsoleVirtualNicResponse,omitempty"` + Res *types.RestartServiceConsoleVirtualNicResponse `xml:"RestartServiceConsoleVirtualNicResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13045,7 +14045,7 @@ func RestartServiceConsoleVirtualNic(ctx context.Context, r soap.RoundTripper, r type RestoreFirmwareConfigurationBody struct { Req *types.RestoreFirmwareConfiguration `xml:"urn:vim25 RestoreFirmwareConfiguration,omitempty"` - Res *types.RestoreFirmwareConfigurationResponse `xml:"urn:vim25 RestoreFirmwareConfigurationResponse,omitempty"` + Res *types.RestoreFirmwareConfigurationResponse `xml:"RestoreFirmwareConfigurationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13065,7 +14065,7 @@ func RestoreFirmwareConfiguration(ctx context.Context, r soap.RoundTripper, req type RetrieveAllPermissionsBody struct { Req *types.RetrieveAllPermissions `xml:"urn:vim25 RetrieveAllPermissions,omitempty"` - Res *types.RetrieveAllPermissionsResponse `xml:"urn:vim25 RetrieveAllPermissionsResponse,omitempty"` + Res *types.RetrieveAllPermissionsResponse `xml:"RetrieveAllPermissionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13085,7 +14085,7 @@ func RetrieveAllPermissions(ctx context.Context, r soap.RoundTripper, req *types type RetrieveAnswerFileBody struct { Req *types.RetrieveAnswerFile `xml:"urn:vim25 RetrieveAnswerFile,omitempty"` - Res *types.RetrieveAnswerFileResponse `xml:"urn:vim25 RetrieveAnswerFileResponse,omitempty"` + Res *types.RetrieveAnswerFileResponse `xml:"RetrieveAnswerFileResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13105,7 +14105,7 @@ func RetrieveAnswerFile(ctx context.Context, r soap.RoundTripper, req *types.Ret type RetrieveAnswerFileForProfileBody struct { Req *types.RetrieveAnswerFileForProfile `xml:"urn:vim25 RetrieveAnswerFileForProfile,omitempty"` - Res *types.RetrieveAnswerFileForProfileResponse `xml:"urn:vim25 RetrieveAnswerFileForProfileResponse,omitempty"` + Res *types.RetrieveAnswerFileForProfileResponse `xml:"RetrieveAnswerFileForProfileResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13125,7 +14125,7 @@ func RetrieveAnswerFileForProfile(ctx context.Context, r soap.RoundTripper, req type RetrieveArgumentDescriptionBody struct { Req *types.RetrieveArgumentDescription `xml:"urn:vim25 RetrieveArgumentDescription,omitempty"` - Res *types.RetrieveArgumentDescriptionResponse `xml:"urn:vim25 RetrieveArgumentDescriptionResponse,omitempty"` + Res *types.RetrieveArgumentDescriptionResponse `xml:"RetrieveArgumentDescriptionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13145,7 +14145,7 @@ func RetrieveArgumentDescription(ctx context.Context, r soap.RoundTripper, req * type RetrieveClientCertBody struct { Req *types.RetrieveClientCert `xml:"urn:vim25 RetrieveClientCert,omitempty"` - Res *types.RetrieveClientCertResponse `xml:"urn:vim25 RetrieveClientCertResponse,omitempty"` + Res *types.RetrieveClientCertResponse `xml:"RetrieveClientCertResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13165,7 +14165,7 @@ func RetrieveClientCert(ctx context.Context, r soap.RoundTripper, req *types.Ret type RetrieveClientCsrBody struct { Req *types.RetrieveClientCsr `xml:"urn:vim25 RetrieveClientCsr,omitempty"` - Res *types.RetrieveClientCsrResponse `xml:"urn:vim25 RetrieveClientCsrResponse,omitempty"` + Res *types.RetrieveClientCsrResponse `xml:"RetrieveClientCsrResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13185,7 +14185,7 @@ func RetrieveClientCsr(ctx context.Context, r soap.RoundTripper, req *types.Retr type RetrieveDasAdvancedRuntimeInfoBody struct { Req *types.RetrieveDasAdvancedRuntimeInfo `xml:"urn:vim25 RetrieveDasAdvancedRuntimeInfo,omitempty"` - Res *types.RetrieveDasAdvancedRuntimeInfoResponse `xml:"urn:vim25 RetrieveDasAdvancedRuntimeInfoResponse,omitempty"` + Res *types.RetrieveDasAdvancedRuntimeInfoResponse `xml:"RetrieveDasAdvancedRuntimeInfoResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13205,7 +14205,7 @@ func RetrieveDasAdvancedRuntimeInfo(ctx context.Context, r soap.RoundTripper, re type RetrieveDescriptionBody struct { Req *types.RetrieveDescription `xml:"urn:vim25 RetrieveDescription,omitempty"` - Res *types.RetrieveDescriptionResponse `xml:"urn:vim25 RetrieveDescriptionResponse,omitempty"` + Res *types.RetrieveDescriptionResponse `xml:"RetrieveDescriptionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13225,7 +14225,7 @@ func RetrieveDescription(ctx context.Context, r soap.RoundTripper, req *types.Re type RetrieveDiskPartitionInfoBody struct { Req *types.RetrieveDiskPartitionInfo `xml:"urn:vim25 RetrieveDiskPartitionInfo,omitempty"` - Res *types.RetrieveDiskPartitionInfoResponse `xml:"urn:vim25 RetrieveDiskPartitionInfoResponse,omitempty"` + Res *types.RetrieveDiskPartitionInfoResponse `xml:"RetrieveDiskPartitionInfoResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13243,9 +14243,29 @@ func RetrieveDiskPartitionInfo(ctx context.Context, r soap.RoundTripper, req *ty return resBody.Res, nil } +type RetrieveDynamicPassthroughInfoBody struct { + Req *types.RetrieveDynamicPassthroughInfo `xml:"urn:vim25 RetrieveDynamicPassthroughInfo,omitempty"` + Res *types.RetrieveDynamicPassthroughInfoResponse `xml:"RetrieveDynamicPassthroughInfoResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveDynamicPassthroughInfoBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveDynamicPassthroughInfo(ctx context.Context, r soap.RoundTripper, req *types.RetrieveDynamicPassthroughInfo) (*types.RetrieveDynamicPassthroughInfoResponse, error) { + var reqBody, resBody RetrieveDynamicPassthroughInfoBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type RetrieveEntityPermissionsBody struct { Req *types.RetrieveEntityPermissions `xml:"urn:vim25 RetrieveEntityPermissions,omitempty"` - Res *types.RetrieveEntityPermissionsResponse `xml:"urn:vim25 RetrieveEntityPermissionsResponse,omitempty"` + Res *types.RetrieveEntityPermissionsResponse `xml:"RetrieveEntityPermissionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13265,7 +14285,7 @@ func RetrieveEntityPermissions(ctx context.Context, r soap.RoundTripper, req *ty type RetrieveEntityScheduledTaskBody struct { Req *types.RetrieveEntityScheduledTask `xml:"urn:vim25 RetrieveEntityScheduledTask,omitempty"` - Res *types.RetrieveEntityScheduledTaskResponse `xml:"urn:vim25 RetrieveEntityScheduledTaskResponse,omitempty"` + Res *types.RetrieveEntityScheduledTaskResponse `xml:"RetrieveEntityScheduledTaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13283,9 +14303,29 @@ func RetrieveEntityScheduledTask(ctx context.Context, r soap.RoundTripper, req * return resBody.Res, nil } +type RetrieveFreeEpcMemoryBody struct { + Req *types.RetrieveFreeEpcMemory `xml:"urn:vim25 RetrieveFreeEpcMemory,omitempty"` + Res *types.RetrieveFreeEpcMemoryResponse `xml:"RetrieveFreeEpcMemoryResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveFreeEpcMemoryBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveFreeEpcMemory(ctx context.Context, r soap.RoundTripper, req *types.RetrieveFreeEpcMemory) (*types.RetrieveFreeEpcMemoryResponse, error) { + var reqBody, resBody RetrieveFreeEpcMemoryBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type RetrieveHardwareUptimeBody struct { Req *types.RetrieveHardwareUptime `xml:"urn:vim25 RetrieveHardwareUptime,omitempty"` - Res *types.RetrieveHardwareUptimeResponse `xml:"urn:vim25 RetrieveHardwareUptimeResponse,omitempty"` + Res *types.RetrieveHardwareUptimeResponse `xml:"RetrieveHardwareUptimeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13305,7 +14345,7 @@ func RetrieveHardwareUptime(ctx context.Context, r soap.RoundTripper, req *types type RetrieveHostAccessControlEntriesBody struct { Req *types.RetrieveHostAccessControlEntries `xml:"urn:vim25 RetrieveHostAccessControlEntries,omitempty"` - Res *types.RetrieveHostAccessControlEntriesResponse `xml:"urn:vim25 RetrieveHostAccessControlEntriesResponse,omitempty"` + Res *types.RetrieveHostAccessControlEntriesResponse `xml:"RetrieveHostAccessControlEntriesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13325,7 +14365,7 @@ func RetrieveHostAccessControlEntries(ctx context.Context, r soap.RoundTripper, type RetrieveHostCustomizationsBody struct { Req *types.RetrieveHostCustomizations `xml:"urn:vim25 RetrieveHostCustomizations,omitempty"` - Res *types.RetrieveHostCustomizationsResponse `xml:"urn:vim25 RetrieveHostCustomizationsResponse,omitempty"` + Res *types.RetrieveHostCustomizationsResponse `xml:"RetrieveHostCustomizationsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13345,7 +14385,7 @@ func RetrieveHostCustomizations(ctx context.Context, r soap.RoundTripper, req *t type RetrieveHostCustomizationsForProfileBody struct { Req *types.RetrieveHostCustomizationsForProfile `xml:"urn:vim25 RetrieveHostCustomizationsForProfile,omitempty"` - Res *types.RetrieveHostCustomizationsForProfileResponse `xml:"urn:vim25 RetrieveHostCustomizationsForProfileResponse,omitempty"` + Res *types.RetrieveHostCustomizationsForProfileResponse `xml:"RetrieveHostCustomizationsForProfileResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13365,7 +14405,7 @@ func RetrieveHostCustomizationsForProfile(ctx context.Context, r soap.RoundTripp type RetrieveHostSpecificationBody struct { Req *types.RetrieveHostSpecification `xml:"urn:vim25 RetrieveHostSpecification,omitempty"` - Res *types.RetrieveHostSpecificationResponse `xml:"urn:vim25 RetrieveHostSpecificationResponse,omitempty"` + Res *types.RetrieveHostSpecificationResponse `xml:"RetrieveHostSpecificationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13385,7 +14425,7 @@ func RetrieveHostSpecification(ctx context.Context, r soap.RoundTripper, req *ty type RetrieveKmipServerCertBody struct { Req *types.RetrieveKmipServerCert `xml:"urn:vim25 RetrieveKmipServerCert,omitempty"` - Res *types.RetrieveKmipServerCertResponse `xml:"urn:vim25 RetrieveKmipServerCertResponse,omitempty"` + Res *types.RetrieveKmipServerCertResponse `xml:"RetrieveKmipServerCertResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13405,7 +14445,7 @@ func RetrieveKmipServerCert(ctx context.Context, r soap.RoundTripper, req *types type RetrieveKmipServersStatus_TaskBody struct { Req *types.RetrieveKmipServersStatus_Task `xml:"urn:vim25 RetrieveKmipServersStatus_Task,omitempty"` - Res *types.RetrieveKmipServersStatus_TaskResponse `xml:"urn:vim25 RetrieveKmipServersStatus_TaskResponse,omitempty"` + Res *types.RetrieveKmipServersStatus_TaskResponse `xml:"RetrieveKmipServersStatus_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13425,7 +14465,7 @@ func RetrieveKmipServersStatus_Task(ctx context.Context, r soap.RoundTripper, re type RetrieveObjectScheduledTaskBody struct { Req *types.RetrieveObjectScheduledTask `xml:"urn:vim25 RetrieveObjectScheduledTask,omitempty"` - Res *types.RetrieveObjectScheduledTaskResponse `xml:"urn:vim25 RetrieveObjectScheduledTaskResponse,omitempty"` + Res *types.RetrieveObjectScheduledTaskResponse `xml:"RetrieveObjectScheduledTaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13445,7 +14485,7 @@ func RetrieveObjectScheduledTask(ctx context.Context, r soap.RoundTripper, req * type RetrieveProductComponentsBody struct { Req *types.RetrieveProductComponents `xml:"urn:vim25 RetrieveProductComponents,omitempty"` - Res *types.RetrieveProductComponentsResponse `xml:"urn:vim25 RetrieveProductComponentsResponse,omitempty"` + Res *types.RetrieveProductComponentsResponse `xml:"RetrieveProductComponentsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13465,7 +14505,7 @@ func RetrieveProductComponents(ctx context.Context, r soap.RoundTripper, req *ty type RetrievePropertiesBody struct { Req *types.RetrieveProperties `xml:"urn:vim25 RetrieveProperties,omitempty"` - Res *types.RetrievePropertiesResponse `xml:"urn:vim25 RetrievePropertiesResponse,omitempty"` + Res *types.RetrievePropertiesResponse `xml:"RetrievePropertiesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13485,7 +14525,7 @@ func RetrieveProperties(ctx context.Context, r soap.RoundTripper, req *types.Ret type RetrievePropertiesExBody struct { Req *types.RetrievePropertiesEx `xml:"urn:vim25 RetrievePropertiesEx,omitempty"` - Res *types.RetrievePropertiesExResponse `xml:"urn:vim25 RetrievePropertiesExResponse,omitempty"` + Res *types.RetrievePropertiesExResponse `xml:"RetrievePropertiesExResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13505,14 +14545,54 @@ func RetrievePropertiesEx(ctx context.Context, r soap.RoundTripper, req *types.R type RetrieveRolePermissionsBody struct { Req *types.RetrieveRolePermissions `xml:"urn:vim25 RetrieveRolePermissions,omitempty"` - Res *types.RetrieveRolePermissionsResponse `xml:"urn:vim25 RetrieveRolePermissionsResponse,omitempty"` + Res *types.RetrieveRolePermissionsResponse `xml:"RetrieveRolePermissionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *RetrieveRolePermissionsBody) Fault() *soap.Fault { return b.Fault_ } -func RetrieveRolePermissions(ctx context.Context, r soap.RoundTripper, req *types.RetrieveRolePermissions) (*types.RetrieveRolePermissionsResponse, error) { - var reqBody, resBody RetrieveRolePermissionsBody +func RetrieveRolePermissions(ctx context.Context, r soap.RoundTripper, req *types.RetrieveRolePermissions) (*types.RetrieveRolePermissionsResponse, error) { + var reqBody, resBody RetrieveRolePermissionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveSelfSignedClientCertBody struct { + Req *types.RetrieveSelfSignedClientCert `xml:"urn:vim25 RetrieveSelfSignedClientCert,omitempty"` + Res *types.RetrieveSelfSignedClientCertResponse `xml:"RetrieveSelfSignedClientCertResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveSelfSignedClientCertBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveSelfSignedClientCert(ctx context.Context, r soap.RoundTripper, req *types.RetrieveSelfSignedClientCert) (*types.RetrieveSelfSignedClientCertResponse, error) { + var reqBody, resBody RetrieveSelfSignedClientCertBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveServiceContentBody struct { + Req *types.RetrieveServiceContent `xml:"urn:vim25 RetrieveServiceContent,omitempty"` + Res *types.RetrieveServiceContentResponse `xml:"RetrieveServiceContentResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveServiceContentBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveServiceContent(ctx context.Context, r soap.RoundTripper, req *types.RetrieveServiceContent) (*types.RetrieveServiceContentResponse, error) { + var reqBody, resBody RetrieveServiceContentBody reqBody.Req = req @@ -13523,16 +14603,16 @@ func RetrieveRolePermissions(ctx context.Context, r soap.RoundTripper, req *type return resBody.Res, nil } -type RetrieveSelfSignedClientCertBody struct { - Req *types.RetrieveSelfSignedClientCert `xml:"urn:vim25 RetrieveSelfSignedClientCert,omitempty"` - Res *types.RetrieveSelfSignedClientCertResponse `xml:"urn:vim25 RetrieveSelfSignedClientCertResponse,omitempty"` - Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +type RetrieveServiceProviderEntitiesBody struct { + Req *types.RetrieveServiceProviderEntities `xml:"urn:vim25 RetrieveServiceProviderEntities,omitempty"` + Res *types.RetrieveServiceProviderEntitiesResponse `xml:"RetrieveServiceProviderEntitiesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } -func (b *RetrieveSelfSignedClientCertBody) Fault() *soap.Fault { return b.Fault_ } +func (b *RetrieveServiceProviderEntitiesBody) Fault() *soap.Fault { return b.Fault_ } -func RetrieveSelfSignedClientCert(ctx context.Context, r soap.RoundTripper, req *types.RetrieveSelfSignedClientCert) (*types.RetrieveSelfSignedClientCertResponse, error) { - var reqBody, resBody RetrieveSelfSignedClientCertBody +func RetrieveServiceProviderEntities(ctx context.Context, r soap.RoundTripper, req *types.RetrieveServiceProviderEntities) (*types.RetrieveServiceProviderEntitiesResponse, error) { + var reqBody, resBody RetrieveServiceProviderEntitiesBody reqBody.Req = req @@ -13543,16 +14623,16 @@ func RetrieveSelfSignedClientCert(ctx context.Context, r soap.RoundTripper, req return resBody.Res, nil } -type RetrieveServiceContentBody struct { - Req *types.RetrieveServiceContent `xml:"urn:vim25 RetrieveServiceContent,omitempty"` - Res *types.RetrieveServiceContentResponse `xml:"urn:vim25 RetrieveServiceContentResponse,omitempty"` - Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +type RetrieveSnapshotDetailsBody struct { + Req *types.RetrieveSnapshotDetails `xml:"urn:vim25 RetrieveSnapshotDetails,omitempty"` + Res *types.RetrieveSnapshotDetailsResponse `xml:"RetrieveSnapshotDetailsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } -func (b *RetrieveServiceContentBody) Fault() *soap.Fault { return b.Fault_ } +func (b *RetrieveSnapshotDetailsBody) Fault() *soap.Fault { return b.Fault_ } -func RetrieveServiceContent(ctx context.Context, r soap.RoundTripper, req *types.RetrieveServiceContent) (*types.RetrieveServiceContentResponse, error) { - var reqBody, resBody RetrieveServiceContentBody +func RetrieveSnapshotDetails(ctx context.Context, r soap.RoundTripper, req *types.RetrieveSnapshotDetails) (*types.RetrieveSnapshotDetailsResponse, error) { + var reqBody, resBody RetrieveSnapshotDetailsBody reqBody.Req = req @@ -13565,7 +14645,7 @@ func RetrieveServiceContent(ctx context.Context, r soap.RoundTripper, req *types type RetrieveSnapshotInfoBody struct { Req *types.RetrieveSnapshotInfo `xml:"urn:vim25 RetrieveSnapshotInfo,omitempty"` - Res *types.RetrieveSnapshotInfoResponse `xml:"urn:vim25 RetrieveSnapshotInfoResponse,omitempty"` + Res *types.RetrieveSnapshotInfoResponse `xml:"RetrieveSnapshotInfoResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13585,7 +14665,7 @@ func RetrieveSnapshotInfo(ctx context.Context, r soap.RoundTripper, req *types.R type RetrieveUserGroupsBody struct { Req *types.RetrieveUserGroups `xml:"urn:vim25 RetrieveUserGroups,omitempty"` - Res *types.RetrieveUserGroupsResponse `xml:"urn:vim25 RetrieveUserGroupsResponse,omitempty"` + Res *types.RetrieveUserGroupsResponse `xml:"RetrieveUserGroupsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13605,7 +14685,7 @@ func RetrieveUserGroups(ctx context.Context, r soap.RoundTripper, req *types.Ret type RetrieveVStorageInfrastructureObjectPolicyBody struct { Req *types.RetrieveVStorageInfrastructureObjectPolicy `xml:"urn:vim25 RetrieveVStorageInfrastructureObjectPolicy,omitempty"` - Res *types.RetrieveVStorageInfrastructureObjectPolicyResponse `xml:"urn:vim25 RetrieveVStorageInfrastructureObjectPolicyResponse,omitempty"` + Res *types.RetrieveVStorageInfrastructureObjectPolicyResponse `xml:"RetrieveVStorageInfrastructureObjectPolicyResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13625,7 +14705,7 @@ func RetrieveVStorageInfrastructureObjectPolicy(ctx context.Context, r soap.Roun type RetrieveVStorageObjectBody struct { Req *types.RetrieveVStorageObject `xml:"urn:vim25 RetrieveVStorageObject,omitempty"` - Res *types.RetrieveVStorageObjectResponse `xml:"urn:vim25 RetrieveVStorageObjectResponse,omitempty"` + Res *types.RetrieveVStorageObjectResponse `xml:"RetrieveVStorageObjectResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13645,7 +14725,7 @@ func RetrieveVStorageObject(ctx context.Context, r soap.RoundTripper, req *types type RetrieveVStorageObjectAssociationsBody struct { Req *types.RetrieveVStorageObjectAssociations `xml:"urn:vim25 RetrieveVStorageObjectAssociations,omitempty"` - Res *types.RetrieveVStorageObjectAssociationsResponse `xml:"urn:vim25 RetrieveVStorageObjectAssociationsResponse,omitempty"` + Res *types.RetrieveVStorageObjectAssociationsResponse `xml:"RetrieveVStorageObjectAssociationsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13665,7 +14745,7 @@ func RetrieveVStorageObjectAssociations(ctx context.Context, r soap.RoundTripper type RetrieveVStorageObjectStateBody struct { Req *types.RetrieveVStorageObjectState `xml:"urn:vim25 RetrieveVStorageObjectState,omitempty"` - Res *types.RetrieveVStorageObjectStateResponse `xml:"urn:vim25 RetrieveVStorageObjectStateResponse,omitempty"` + Res *types.RetrieveVStorageObjectStateResponse `xml:"RetrieveVStorageObjectStateResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13683,9 +14763,69 @@ func RetrieveVStorageObjectState(ctx context.Context, r soap.RoundTripper, req * return resBody.Res, nil } +type RetrieveVendorDeviceGroupInfoBody struct { + Req *types.RetrieveVendorDeviceGroupInfo `xml:"urn:vim25 RetrieveVendorDeviceGroupInfo,omitempty"` + Res *types.RetrieveVendorDeviceGroupInfoResponse `xml:"RetrieveVendorDeviceGroupInfoResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveVendorDeviceGroupInfoBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveVendorDeviceGroupInfo(ctx context.Context, r soap.RoundTripper, req *types.RetrieveVendorDeviceGroupInfo) (*types.RetrieveVendorDeviceGroupInfoResponse, error) { + var reqBody, resBody RetrieveVendorDeviceGroupInfoBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveVgpuDeviceInfoBody struct { + Req *types.RetrieveVgpuDeviceInfo `xml:"urn:vim25 RetrieveVgpuDeviceInfo,omitempty"` + Res *types.RetrieveVgpuDeviceInfoResponse `xml:"RetrieveVgpuDeviceInfoResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveVgpuDeviceInfoBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveVgpuDeviceInfo(ctx context.Context, r soap.RoundTripper, req *types.RetrieveVgpuDeviceInfo) (*types.RetrieveVgpuDeviceInfoResponse, error) { + var reqBody, resBody RetrieveVgpuDeviceInfoBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveVgpuProfileInfoBody struct { + Req *types.RetrieveVgpuProfileInfo `xml:"urn:vim25 RetrieveVgpuProfileInfo,omitempty"` + Res *types.RetrieveVgpuProfileInfoResponse `xml:"RetrieveVgpuProfileInfoResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveVgpuProfileInfoBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveVgpuProfileInfo(ctx context.Context, r soap.RoundTripper, req *types.RetrieveVgpuProfileInfo) (*types.RetrieveVgpuProfileInfoResponse, error) { + var reqBody, resBody RetrieveVgpuProfileInfoBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type RevertToCurrentSnapshot_TaskBody struct { Req *types.RevertToCurrentSnapshot_Task `xml:"urn:vim25 RevertToCurrentSnapshot_Task,omitempty"` - Res *types.RevertToCurrentSnapshot_TaskResponse `xml:"urn:vim25 RevertToCurrentSnapshot_TaskResponse,omitempty"` + Res *types.RevertToCurrentSnapshot_TaskResponse `xml:"RevertToCurrentSnapshot_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13705,7 +14845,7 @@ func RevertToCurrentSnapshot_Task(ctx context.Context, r soap.RoundTripper, req type RevertToSnapshot_TaskBody struct { Req *types.RevertToSnapshot_Task `xml:"urn:vim25 RevertToSnapshot_Task,omitempty"` - Res *types.RevertToSnapshot_TaskResponse `xml:"urn:vim25 RevertToSnapshot_TaskResponse,omitempty"` + Res *types.RevertToSnapshot_TaskResponse `xml:"RevertToSnapshot_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13725,7 +14865,7 @@ func RevertToSnapshot_Task(ctx context.Context, r soap.RoundTripper, req *types. type RevertVStorageObject_TaskBody struct { Req *types.RevertVStorageObject_Task `xml:"urn:vim25 RevertVStorageObject_Task,omitempty"` - Res *types.RevertVStorageObject_TaskResponse `xml:"urn:vim25 RevertVStorageObject_TaskResponse,omitempty"` + Res *types.RevertVStorageObject_TaskResponse `xml:"RevertVStorageObject_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13745,7 +14885,7 @@ func RevertVStorageObject_Task(ctx context.Context, r soap.RoundTripper, req *ty type RewindCollectorBody struct { Req *types.RewindCollector `xml:"urn:vim25 RewindCollector,omitempty"` - Res *types.RewindCollectorResponse `xml:"urn:vim25 RewindCollectorResponse,omitempty"` + Res *types.RewindCollectorResponse `xml:"RewindCollectorResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13765,7 +14905,7 @@ func RewindCollector(ctx context.Context, r soap.RoundTripper, req *types.Rewind type RunScheduledTaskBody struct { Req *types.RunScheduledTask `xml:"urn:vim25 RunScheduledTask,omitempty"` - Res *types.RunScheduledTaskResponse `xml:"urn:vim25 RunScheduledTaskResponse,omitempty"` + Res *types.RunScheduledTaskResponse `xml:"RunScheduledTaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13785,7 +14925,7 @@ func RunScheduledTask(ctx context.Context, r soap.RoundTripper, req *types.RunSc type RunVsanPhysicalDiskDiagnosticsBody struct { Req *types.RunVsanPhysicalDiskDiagnostics `xml:"urn:vim25 RunVsanPhysicalDiskDiagnostics,omitempty"` - Res *types.RunVsanPhysicalDiskDiagnosticsResponse `xml:"urn:vim25 RunVsanPhysicalDiskDiagnosticsResponse,omitempty"` + Res *types.RunVsanPhysicalDiskDiagnosticsResponse `xml:"RunVsanPhysicalDiskDiagnosticsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13805,7 +14945,7 @@ func RunVsanPhysicalDiskDiagnostics(ctx context.Context, r soap.RoundTripper, re type ScanHostPatchV2_TaskBody struct { Req *types.ScanHostPatchV2_Task `xml:"urn:vim25 ScanHostPatchV2_Task,omitempty"` - Res *types.ScanHostPatchV2_TaskResponse `xml:"urn:vim25 ScanHostPatchV2_TaskResponse,omitempty"` + Res *types.ScanHostPatchV2_TaskResponse `xml:"ScanHostPatchV2_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13825,7 +14965,7 @@ func ScanHostPatchV2_Task(ctx context.Context, r soap.RoundTripper, req *types.S type ScanHostPatch_TaskBody struct { Req *types.ScanHostPatch_Task `xml:"urn:vim25 ScanHostPatch_Task,omitempty"` - Res *types.ScanHostPatch_TaskResponse `xml:"urn:vim25 ScanHostPatch_TaskResponse,omitempty"` + Res *types.ScanHostPatch_TaskResponse `xml:"ScanHostPatch_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13845,7 +14985,7 @@ func ScanHostPatch_Task(ctx context.Context, r soap.RoundTripper, req *types.Sca type ScheduleReconcileDatastoreInventoryBody struct { Req *types.ScheduleReconcileDatastoreInventory `xml:"urn:vim25 ScheduleReconcileDatastoreInventory,omitempty"` - Res *types.ScheduleReconcileDatastoreInventoryResponse `xml:"urn:vim25 ScheduleReconcileDatastoreInventoryResponse,omitempty"` + Res *types.ScheduleReconcileDatastoreInventoryResponse `xml:"ScheduleReconcileDatastoreInventoryResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13865,7 +15005,7 @@ func ScheduleReconcileDatastoreInventory(ctx context.Context, r soap.RoundTrippe type SearchDatastoreSubFolders_TaskBody struct { Req *types.SearchDatastoreSubFolders_Task `xml:"urn:vim25 SearchDatastoreSubFolders_Task,omitempty"` - Res *types.SearchDatastoreSubFolders_TaskResponse `xml:"urn:vim25 SearchDatastoreSubFolders_TaskResponse,omitempty"` + Res *types.SearchDatastoreSubFolders_TaskResponse `xml:"SearchDatastoreSubFolders_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13885,7 +15025,7 @@ func SearchDatastoreSubFolders_Task(ctx context.Context, r soap.RoundTripper, re type SearchDatastore_TaskBody struct { Req *types.SearchDatastore_Task `xml:"urn:vim25 SearchDatastore_Task,omitempty"` - Res *types.SearchDatastore_TaskResponse `xml:"urn:vim25 SearchDatastore_TaskResponse,omitempty"` + Res *types.SearchDatastore_TaskResponse `xml:"SearchDatastore_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13905,7 +15045,7 @@ func SearchDatastore_Task(ctx context.Context, r soap.RoundTripper, req *types.S type SelectActivePartitionBody struct { Req *types.SelectActivePartition `xml:"urn:vim25 SelectActivePartition,omitempty"` - Res *types.SelectActivePartitionResponse `xml:"urn:vim25 SelectActivePartitionResponse,omitempty"` + Res *types.SelectActivePartitionResponse `xml:"SelectActivePartitionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13925,7 +15065,7 @@ func SelectActivePartition(ctx context.Context, r soap.RoundTripper, req *types. type SelectVnicBody struct { Req *types.SelectVnic `xml:"urn:vim25 SelectVnic,omitempty"` - Res *types.SelectVnicResponse `xml:"urn:vim25 SelectVnicResponse,omitempty"` + Res *types.SelectVnicResponse `xml:"SelectVnicResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13945,7 +15085,7 @@ func SelectVnic(ctx context.Context, r soap.RoundTripper, req *types.SelectVnic) type SelectVnicForNicTypeBody struct { Req *types.SelectVnicForNicType `xml:"urn:vim25 SelectVnicForNicType,omitempty"` - Res *types.SelectVnicForNicTypeResponse `xml:"urn:vim25 SelectVnicForNicTypeResponse,omitempty"` + Res *types.SelectVnicForNicTypeResponse `xml:"SelectVnicForNicTypeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13965,7 +15105,7 @@ func SelectVnicForNicType(ctx context.Context, r soap.RoundTripper, req *types.S type SendNMIBody struct { Req *types.SendNMI `xml:"urn:vim25 SendNMI,omitempty"` - Res *types.SendNMIResponse `xml:"urn:vim25 SendNMIResponse,omitempty"` + Res *types.SendNMIResponse `xml:"SendNMIResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13985,7 +15125,7 @@ func SendNMI(ctx context.Context, r soap.RoundTripper, req *types.SendNMI) (*typ type SendTestNotificationBody struct { Req *types.SendTestNotification `xml:"urn:vim25 SendTestNotification,omitempty"` - Res *types.SendTestNotificationResponse `xml:"urn:vim25 SendTestNotificationResponse,omitempty"` + Res *types.SendTestNotificationResponse `xml:"SendTestNotificationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14005,7 +15145,7 @@ func SendTestNotification(ctx context.Context, r soap.RoundTripper, req *types.S type SessionIsActiveBody struct { Req *types.SessionIsActive `xml:"urn:vim25 SessionIsActive,omitempty"` - Res *types.SessionIsActiveResponse `xml:"urn:vim25 SessionIsActiveResponse,omitempty"` + Res *types.SessionIsActiveResponse `xml:"SessionIsActiveResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14025,7 +15165,7 @@ func SessionIsActive(ctx context.Context, r soap.RoundTripper, req *types.Sessio type SetCollectorPageSizeBody struct { Req *types.SetCollectorPageSize `xml:"urn:vim25 SetCollectorPageSize,omitempty"` - Res *types.SetCollectorPageSizeResponse `xml:"urn:vim25 SetCollectorPageSizeResponse,omitempty"` + Res *types.SetCollectorPageSizeResponse `xml:"SetCollectorPageSizeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14043,9 +15183,49 @@ func SetCollectorPageSize(ctx context.Context, r soap.RoundTripper, req *types.S return resBody.Res, nil } +type SetCryptoModeBody struct { + Req *types.SetCryptoMode `xml:"urn:vim25 SetCryptoMode,omitempty"` + Res *types.SetCryptoModeResponse `xml:"SetCryptoModeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetCryptoModeBody) Fault() *soap.Fault { return b.Fault_ } + +func SetCryptoMode(ctx context.Context, r soap.RoundTripper, req *types.SetCryptoMode) (*types.SetCryptoModeResponse, error) { + var reqBody, resBody SetCryptoModeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetDefaultKmsClusterBody struct { + Req *types.SetDefaultKmsCluster `xml:"urn:vim25 SetDefaultKmsCluster,omitempty"` + Res *types.SetDefaultKmsClusterResponse `xml:"SetDefaultKmsClusterResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetDefaultKmsClusterBody) Fault() *soap.Fault { return b.Fault_ } + +func SetDefaultKmsCluster(ctx context.Context, r soap.RoundTripper, req *types.SetDefaultKmsCluster) (*types.SetDefaultKmsClusterResponse, error) { + var reqBody, resBody SetDefaultKmsClusterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type SetDisplayTopologyBody struct { Req *types.SetDisplayTopology `xml:"urn:vim25 SetDisplayTopology,omitempty"` - Res *types.SetDisplayTopologyResponse `xml:"urn:vim25 SetDisplayTopologyResponse,omitempty"` + Res *types.SetDisplayTopologyResponse `xml:"SetDisplayTopologyResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14065,7 +15245,7 @@ func SetDisplayTopology(ctx context.Context, r soap.RoundTripper, req *types.Set type SetEntityPermissionsBody struct { Req *types.SetEntityPermissions `xml:"urn:vim25 SetEntityPermissions,omitempty"` - Res *types.SetEntityPermissionsResponse `xml:"urn:vim25 SetEntityPermissionsResponse,omitempty"` + Res *types.SetEntityPermissionsResponse `xml:"SetEntityPermissionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14085,7 +15265,7 @@ func SetEntityPermissions(ctx context.Context, r soap.RoundTripper, req *types.S type SetExtensionCertificateBody struct { Req *types.SetExtensionCertificate `xml:"urn:vim25 SetExtensionCertificate,omitempty"` - Res *types.SetExtensionCertificateResponse `xml:"urn:vim25 SetExtensionCertificateResponse,omitempty"` + Res *types.SetExtensionCertificateResponse `xml:"SetExtensionCertificateResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14105,7 +15285,7 @@ func SetExtensionCertificate(ctx context.Context, r soap.RoundTripper, req *type type SetFieldBody struct { Req *types.SetField `xml:"urn:vim25 SetField,omitempty"` - Res *types.SetFieldResponse `xml:"urn:vim25 SetFieldResponse,omitempty"` + Res *types.SetFieldResponse `xml:"SetFieldResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14125,7 +15305,7 @@ func SetField(ctx context.Context, r soap.RoundTripper, req *types.SetField) (*t type SetLicenseEditionBody struct { Req *types.SetLicenseEdition `xml:"urn:vim25 SetLicenseEdition,omitempty"` - Res *types.SetLicenseEditionResponse `xml:"urn:vim25 SetLicenseEditionResponse,omitempty"` + Res *types.SetLicenseEditionResponse `xml:"SetLicenseEditionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14145,7 +15325,7 @@ func SetLicenseEdition(ctx context.Context, r soap.RoundTripper, req *types.SetL type SetLocaleBody struct { Req *types.SetLocale `xml:"urn:vim25 SetLocale,omitempty"` - Res *types.SetLocaleResponse `xml:"urn:vim25 SetLocaleResponse,omitempty"` + Res *types.SetLocaleResponse `xml:"SetLocaleResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14163,9 +15343,29 @@ func SetLocale(ctx context.Context, r soap.RoundTripper, req *types.SetLocale) ( return resBody.Res, nil } +type SetMaxQueueDepthBody struct { + Req *types.SetMaxQueueDepth `xml:"urn:vim25 SetMaxQueueDepth,omitempty"` + Res *types.SetMaxQueueDepthResponse `xml:"SetMaxQueueDepthResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetMaxQueueDepthBody) Fault() *soap.Fault { return b.Fault_ } + +func SetMaxQueueDepth(ctx context.Context, r soap.RoundTripper, req *types.SetMaxQueueDepth) (*types.SetMaxQueueDepthResponse, error) { + var reqBody, resBody SetMaxQueueDepthBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type SetMultipathLunPolicyBody struct { Req *types.SetMultipathLunPolicy `xml:"urn:vim25 SetMultipathLunPolicy,omitempty"` - Res *types.SetMultipathLunPolicyResponse `xml:"urn:vim25 SetMultipathLunPolicyResponse,omitempty"` + Res *types.SetMultipathLunPolicyResponse `xml:"SetMultipathLunPolicyResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14185,7 +15385,7 @@ func SetMultipathLunPolicy(ctx context.Context, r soap.RoundTripper, req *types. type SetNFSUserBody struct { Req *types.SetNFSUser `xml:"urn:vim25 SetNFSUser,omitempty"` - Res *types.SetNFSUserResponse `xml:"urn:vim25 SetNFSUserResponse,omitempty"` + Res *types.SetNFSUserResponse `xml:"SetNFSUserResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14205,7 +15405,7 @@ func SetNFSUser(ctx context.Context, r soap.RoundTripper, req *types.SetNFSUser) type SetPublicKeyBody struct { Req *types.SetPublicKey `xml:"urn:vim25 SetPublicKey,omitempty"` - Res *types.SetPublicKeyResponse `xml:"urn:vim25 SetPublicKeyResponse,omitempty"` + Res *types.SetPublicKeyResponse `xml:"SetPublicKeyResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14225,7 +15425,7 @@ func SetPublicKey(ctx context.Context, r soap.RoundTripper, req *types.SetPublic type SetRegistryValueInGuestBody struct { Req *types.SetRegistryValueInGuest `xml:"urn:vim25 SetRegistryValueInGuest,omitempty"` - Res *types.SetRegistryValueInGuestResponse `xml:"urn:vim25 SetRegistryValueInGuestResponse,omitempty"` + Res *types.SetRegistryValueInGuestResponse `xml:"SetRegistryValueInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14245,7 +15445,7 @@ func SetRegistryValueInGuest(ctx context.Context, r soap.RoundTripper, req *type type SetScreenResolutionBody struct { Req *types.SetScreenResolution `xml:"urn:vim25 SetScreenResolution,omitempty"` - Res *types.SetScreenResolutionResponse `xml:"urn:vim25 SetScreenResolutionResponse,omitempty"` + Res *types.SetScreenResolutionResponse `xml:"SetScreenResolutionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14265,7 +15465,7 @@ func SetScreenResolution(ctx context.Context, r soap.RoundTripper, req *types.Se type SetTaskDescriptionBody struct { Req *types.SetTaskDescription `xml:"urn:vim25 SetTaskDescription,omitempty"` - Res *types.SetTaskDescriptionResponse `xml:"urn:vim25 SetTaskDescriptionResponse,omitempty"` + Res *types.SetTaskDescriptionResponse `xml:"SetTaskDescriptionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14285,7 +15485,7 @@ func SetTaskDescription(ctx context.Context, r soap.RoundTripper, req *types.Set type SetTaskStateBody struct { Req *types.SetTaskState `xml:"urn:vim25 SetTaskState,omitempty"` - Res *types.SetTaskStateResponse `xml:"urn:vim25 SetTaskStateResponse,omitempty"` + Res *types.SetTaskStateResponse `xml:"SetTaskStateResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14305,7 +15505,7 @@ func SetTaskState(ctx context.Context, r soap.RoundTripper, req *types.SetTaskSt type SetVStorageObjectControlFlagsBody struct { Req *types.SetVStorageObjectControlFlags `xml:"urn:vim25 SetVStorageObjectControlFlags,omitempty"` - Res *types.SetVStorageObjectControlFlagsResponse `xml:"urn:vim25 SetVStorageObjectControlFlagsResponse,omitempty"` + Res *types.SetVStorageObjectControlFlagsResponse `xml:"SetVStorageObjectControlFlagsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14325,7 +15525,7 @@ func SetVStorageObjectControlFlags(ctx context.Context, r soap.RoundTripper, req type SetVirtualDiskUuidBody struct { Req *types.SetVirtualDiskUuid `xml:"urn:vim25 SetVirtualDiskUuid,omitempty"` - Res *types.SetVirtualDiskUuidResponse `xml:"urn:vim25 SetVirtualDiskUuidResponse,omitempty"` + Res *types.SetVirtualDiskUuidResponse `xml:"SetVirtualDiskUuidResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14345,7 +15545,7 @@ func SetVirtualDiskUuid(ctx context.Context, r soap.RoundTripper, req *types.Set type ShrinkVirtualDisk_TaskBody struct { Req *types.ShrinkVirtualDisk_Task `xml:"urn:vim25 ShrinkVirtualDisk_Task,omitempty"` - Res *types.ShrinkVirtualDisk_TaskResponse `xml:"urn:vim25 ShrinkVirtualDisk_TaskResponse,omitempty"` + Res *types.ShrinkVirtualDisk_TaskResponse `xml:"ShrinkVirtualDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14365,7 +15565,7 @@ func ShrinkVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types type ShutdownGuestBody struct { Req *types.ShutdownGuest `xml:"urn:vim25 ShutdownGuest,omitempty"` - Res *types.ShutdownGuestResponse `xml:"urn:vim25 ShutdownGuestResponse,omitempty"` + Res *types.ShutdownGuestResponse `xml:"ShutdownGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14385,7 +15585,7 @@ func ShutdownGuest(ctx context.Context, r soap.RoundTripper, req *types.Shutdown type ShutdownHost_TaskBody struct { Req *types.ShutdownHost_Task `xml:"urn:vim25 ShutdownHost_Task,omitempty"` - Res *types.ShutdownHost_TaskResponse `xml:"urn:vim25 ShutdownHost_TaskResponse,omitempty"` + Res *types.ShutdownHost_TaskResponse `xml:"ShutdownHost_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14405,7 +15605,7 @@ func ShutdownHost_Task(ctx context.Context, r soap.RoundTripper, req *types.Shut type StageHostPatch_TaskBody struct { Req *types.StageHostPatch_Task `xml:"urn:vim25 StageHostPatch_Task,omitempty"` - Res *types.StageHostPatch_TaskResponse `xml:"urn:vim25 StageHostPatch_TaskResponse,omitempty"` + Res *types.StageHostPatch_TaskResponse `xml:"StageHostPatch_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14425,7 +15625,7 @@ func StageHostPatch_Task(ctx context.Context, r soap.RoundTripper, req *types.St type StampAllRulesWithUuid_TaskBody struct { Req *types.StampAllRulesWithUuid_Task `xml:"urn:vim25 StampAllRulesWithUuid_Task,omitempty"` - Res *types.StampAllRulesWithUuid_TaskResponse `xml:"urn:vim25 StampAllRulesWithUuid_TaskResponse,omitempty"` + Res *types.StampAllRulesWithUuid_TaskResponse `xml:"StampAllRulesWithUuid_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14445,7 +15645,7 @@ func StampAllRulesWithUuid_Task(ctx context.Context, r soap.RoundTripper, req *t type StandbyGuestBody struct { Req *types.StandbyGuest `xml:"urn:vim25 StandbyGuest,omitempty"` - Res *types.StandbyGuestResponse `xml:"urn:vim25 StandbyGuestResponse,omitempty"` + Res *types.StandbyGuestResponse `xml:"StandbyGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14463,9 +15663,29 @@ func StandbyGuest(ctx context.Context, r soap.RoundTripper, req *types.StandbyGu return resBody.Res, nil } +type StartGuestNetwork_TaskBody struct { + Req *types.StartGuestNetwork_Task `xml:"urn:vim25 StartGuestNetwork_Task,omitempty"` + Res *types.StartGuestNetwork_TaskResponse `xml:"StartGuestNetwork_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *StartGuestNetwork_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func StartGuestNetwork_Task(ctx context.Context, r soap.RoundTripper, req *types.StartGuestNetwork_Task) (*types.StartGuestNetwork_TaskResponse, error) { + var reqBody, resBody StartGuestNetwork_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type StartProgramInGuestBody struct { Req *types.StartProgramInGuest `xml:"urn:vim25 StartProgramInGuest,omitempty"` - Res *types.StartProgramInGuestResponse `xml:"urn:vim25 StartProgramInGuestResponse,omitempty"` + Res *types.StartProgramInGuestResponse `xml:"StartProgramInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14485,7 +15705,7 @@ func StartProgramInGuest(ctx context.Context, r soap.RoundTripper, req *types.St type StartRecording_TaskBody struct { Req *types.StartRecording_Task `xml:"urn:vim25 StartRecording_Task,omitempty"` - Res *types.StartRecording_TaskResponse `xml:"urn:vim25 StartRecording_TaskResponse,omitempty"` + Res *types.StartRecording_TaskResponse `xml:"StartRecording_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14505,7 +15725,7 @@ func StartRecording_Task(ctx context.Context, r soap.RoundTripper, req *types.St type StartReplaying_TaskBody struct { Req *types.StartReplaying_Task `xml:"urn:vim25 StartReplaying_Task,omitempty"` - Res *types.StartReplaying_TaskResponse `xml:"urn:vim25 StartReplaying_TaskResponse,omitempty"` + Res *types.StartReplaying_TaskResponse `xml:"StartReplaying_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14525,7 +15745,7 @@ func StartReplaying_Task(ctx context.Context, r soap.RoundTripper, req *types.St type StartServiceBody struct { Req *types.StartService `xml:"urn:vim25 StartService,omitempty"` - Res *types.StartServiceResponse `xml:"urn:vim25 StartServiceResponse,omitempty"` + Res *types.StartServiceResponse `xml:"StartServiceResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14545,7 +15765,7 @@ func StartService(ctx context.Context, r soap.RoundTripper, req *types.StartServ type StopRecording_TaskBody struct { Req *types.StopRecording_Task `xml:"urn:vim25 StopRecording_Task,omitempty"` - Res *types.StopRecording_TaskResponse `xml:"urn:vim25 StopRecording_TaskResponse,omitempty"` + Res *types.StopRecording_TaskResponse `xml:"StopRecording_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14565,7 +15785,7 @@ func StopRecording_Task(ctx context.Context, r soap.RoundTripper, req *types.Sto type StopReplaying_TaskBody struct { Req *types.StopReplaying_Task `xml:"urn:vim25 StopReplaying_Task,omitempty"` - Res *types.StopReplaying_TaskResponse `xml:"urn:vim25 StopReplaying_TaskResponse,omitempty"` + Res *types.StopReplaying_TaskResponse `xml:"StopReplaying_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14585,7 +15805,7 @@ func StopReplaying_Task(ctx context.Context, r soap.RoundTripper, req *types.Sto type StopServiceBody struct { Req *types.StopService `xml:"urn:vim25 StopService,omitempty"` - Res *types.StopServiceResponse `xml:"urn:vim25 StopServiceResponse,omitempty"` + Res *types.StopServiceResponse `xml:"StopServiceResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14605,7 +15825,7 @@ func StopService(ctx context.Context, r soap.RoundTripper, req *types.StopServic type SuspendVApp_TaskBody struct { Req *types.SuspendVApp_Task `xml:"urn:vim25 SuspendVApp_Task,omitempty"` - Res *types.SuspendVApp_TaskResponse `xml:"urn:vim25 SuspendVApp_TaskResponse,omitempty"` + Res *types.SuspendVApp_TaskResponse `xml:"SuspendVApp_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14625,7 +15845,7 @@ func SuspendVApp_Task(ctx context.Context, r soap.RoundTripper, req *types.Suspe type SuspendVM_TaskBody struct { Req *types.SuspendVM_Task `xml:"urn:vim25 SuspendVM_Task,omitempty"` - Res *types.SuspendVM_TaskResponse `xml:"urn:vim25 SuspendVM_TaskResponse,omitempty"` + Res *types.SuspendVM_TaskResponse `xml:"SuspendVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14645,7 +15865,7 @@ func SuspendVM_Task(ctx context.Context, r soap.RoundTripper, req *types.Suspend type TerminateFaultTolerantVM_TaskBody struct { Req *types.TerminateFaultTolerantVM_Task `xml:"urn:vim25 TerminateFaultTolerantVM_Task,omitempty"` - Res *types.TerminateFaultTolerantVM_TaskResponse `xml:"urn:vim25 TerminateFaultTolerantVM_TaskResponse,omitempty"` + Res *types.TerminateFaultTolerantVM_TaskResponse `xml:"TerminateFaultTolerantVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14665,7 +15885,7 @@ func TerminateFaultTolerantVM_Task(ctx context.Context, r soap.RoundTripper, req type TerminateProcessInGuestBody struct { Req *types.TerminateProcessInGuest `xml:"urn:vim25 TerminateProcessInGuest,omitempty"` - Res *types.TerminateProcessInGuestResponse `xml:"urn:vim25 TerminateProcessInGuestResponse,omitempty"` + Res *types.TerminateProcessInGuestResponse `xml:"TerminateProcessInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14685,7 +15905,7 @@ func TerminateProcessInGuest(ctx context.Context, r soap.RoundTripper, req *type type TerminateSessionBody struct { Req *types.TerminateSession `xml:"urn:vim25 TerminateSession,omitempty"` - Res *types.TerminateSessionResponse `xml:"urn:vim25 TerminateSessionResponse,omitempty"` + Res *types.TerminateSessionResponse `xml:"TerminateSessionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14705,7 +15925,7 @@ func TerminateSession(ctx context.Context, r soap.RoundTripper, req *types.Termi type TerminateVMBody struct { Req *types.TerminateVM `xml:"urn:vim25 TerminateVM,omitempty"` - Res *types.TerminateVMResponse `xml:"urn:vim25 TerminateVMResponse,omitempty"` + Res *types.TerminateVMResponse `xml:"TerminateVMResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14723,9 +15943,29 @@ func TerminateVM(ctx context.Context, r soap.RoundTripper, req *types.TerminateV return resBody.Res, nil } +type TestTimeServiceBody struct { + Req *types.TestTimeService `xml:"urn:vim25 TestTimeService,omitempty"` + Res *types.TestTimeServiceResponse `xml:"TestTimeServiceResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *TestTimeServiceBody) Fault() *soap.Fault { return b.Fault_ } + +func TestTimeService(ctx context.Context, r soap.RoundTripper, req *types.TestTimeService) (*types.TestTimeServiceResponse, error) { + var reqBody, resBody TestTimeServiceBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type TurnDiskLocatorLedOff_TaskBody struct { Req *types.TurnDiskLocatorLedOff_Task `xml:"urn:vim25 TurnDiskLocatorLedOff_Task,omitempty"` - Res *types.TurnDiskLocatorLedOff_TaskResponse `xml:"urn:vim25 TurnDiskLocatorLedOff_TaskResponse,omitempty"` + Res *types.TurnDiskLocatorLedOff_TaskResponse `xml:"TurnDiskLocatorLedOff_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14745,7 +15985,7 @@ func TurnDiskLocatorLedOff_Task(ctx context.Context, r soap.RoundTripper, req *t type TurnDiskLocatorLedOn_TaskBody struct { Req *types.TurnDiskLocatorLedOn_Task `xml:"urn:vim25 TurnDiskLocatorLedOn_Task,omitempty"` - Res *types.TurnDiskLocatorLedOn_TaskResponse `xml:"urn:vim25 TurnDiskLocatorLedOn_TaskResponse,omitempty"` + Res *types.TurnDiskLocatorLedOn_TaskResponse `xml:"TurnDiskLocatorLedOn_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14765,7 +16005,7 @@ func TurnDiskLocatorLedOn_Task(ctx context.Context, r soap.RoundTripper, req *ty type TurnOffFaultToleranceForVM_TaskBody struct { Req *types.TurnOffFaultToleranceForVM_Task `xml:"urn:vim25 TurnOffFaultToleranceForVM_Task,omitempty"` - Res *types.TurnOffFaultToleranceForVM_TaskResponse `xml:"urn:vim25 TurnOffFaultToleranceForVM_TaskResponse,omitempty"` + Res *types.TurnOffFaultToleranceForVM_TaskResponse `xml:"TurnOffFaultToleranceForVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14785,7 +16025,7 @@ func TurnOffFaultToleranceForVM_Task(ctx context.Context, r soap.RoundTripper, r type UnassignUserFromGroupBody struct { Req *types.UnassignUserFromGroup `xml:"urn:vim25 UnassignUserFromGroup,omitempty"` - Res *types.UnassignUserFromGroupResponse `xml:"urn:vim25 UnassignUserFromGroupResponse,omitempty"` + Res *types.UnassignUserFromGroupResponse `xml:"UnassignUserFromGroupResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14805,7 +16045,7 @@ func UnassignUserFromGroup(ctx context.Context, r soap.RoundTripper, req *types. type UnbindVnicBody struct { Req *types.UnbindVnic `xml:"urn:vim25 UnbindVnic,omitempty"` - Res *types.UnbindVnicResponse `xml:"urn:vim25 UnbindVnicResponse,omitempty"` + Res *types.UnbindVnicResponse `xml:"UnbindVnicResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14825,7 +16065,7 @@ func UnbindVnic(ctx context.Context, r soap.RoundTripper, req *types.UnbindVnic) type UninstallHostPatch_TaskBody struct { Req *types.UninstallHostPatch_Task `xml:"urn:vim25 UninstallHostPatch_Task,omitempty"` - Res *types.UninstallHostPatch_TaskResponse `xml:"urn:vim25 UninstallHostPatch_TaskResponse,omitempty"` + Res *types.UninstallHostPatch_TaskResponse `xml:"UninstallHostPatch_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14845,7 +16085,7 @@ func UninstallHostPatch_Task(ctx context.Context, r soap.RoundTripper, req *type type UninstallIoFilter_TaskBody struct { Req *types.UninstallIoFilter_Task `xml:"urn:vim25 UninstallIoFilter_Task,omitempty"` - Res *types.UninstallIoFilter_TaskResponse `xml:"urn:vim25 UninstallIoFilter_TaskResponse,omitempty"` + Res *types.UninstallIoFilter_TaskResponse `xml:"UninstallIoFilter_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14865,7 +16105,7 @@ func UninstallIoFilter_Task(ctx context.Context, r soap.RoundTripper, req *types type UninstallServiceBody struct { Req *types.UninstallService `xml:"urn:vim25 UninstallService,omitempty"` - Res *types.UninstallServiceResponse `xml:"urn:vim25 UninstallServiceResponse,omitempty"` + Res *types.UninstallServiceResponse `xml:"UninstallServiceResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14885,7 +16125,7 @@ func UninstallService(ctx context.Context, r soap.RoundTripper, req *types.Unins type UnmapVmfsVolumeEx_TaskBody struct { Req *types.UnmapVmfsVolumeEx_Task `xml:"urn:vim25 UnmapVmfsVolumeEx_Task,omitempty"` - Res *types.UnmapVmfsVolumeEx_TaskResponse `xml:"urn:vim25 UnmapVmfsVolumeEx_TaskResponse,omitempty"` + Res *types.UnmapVmfsVolumeEx_TaskResponse `xml:"UnmapVmfsVolumeEx_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14903,9 +16143,29 @@ func UnmapVmfsVolumeEx_Task(ctx context.Context, r soap.RoundTripper, req *types return resBody.Res, nil } +type UnmarkServiceProviderEntitiesBody struct { + Req *types.UnmarkServiceProviderEntities `xml:"urn:vim25 UnmarkServiceProviderEntities,omitempty"` + Res *types.UnmarkServiceProviderEntitiesResponse `xml:"UnmarkServiceProviderEntitiesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UnmarkServiceProviderEntitiesBody) Fault() *soap.Fault { return b.Fault_ } + +func UnmarkServiceProviderEntities(ctx context.Context, r soap.RoundTripper, req *types.UnmarkServiceProviderEntities) (*types.UnmarkServiceProviderEntitiesResponse, error) { + var reqBody, resBody UnmarkServiceProviderEntitiesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type UnmountDiskMapping_TaskBody struct { Req *types.UnmountDiskMapping_Task `xml:"urn:vim25 UnmountDiskMapping_Task,omitempty"` - Res *types.UnmountDiskMapping_TaskResponse `xml:"urn:vim25 UnmountDiskMapping_TaskResponse,omitempty"` + Res *types.UnmountDiskMapping_TaskResponse `xml:"UnmountDiskMapping_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14925,7 +16185,7 @@ func UnmountDiskMapping_Task(ctx context.Context, r soap.RoundTripper, req *type type UnmountForceMountedVmfsVolumeBody struct { Req *types.UnmountForceMountedVmfsVolume `xml:"urn:vim25 UnmountForceMountedVmfsVolume,omitempty"` - Res *types.UnmountForceMountedVmfsVolumeResponse `xml:"urn:vim25 UnmountForceMountedVmfsVolumeResponse,omitempty"` + Res *types.UnmountForceMountedVmfsVolumeResponse `xml:"UnmountForceMountedVmfsVolumeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14945,7 +16205,7 @@ func UnmountForceMountedVmfsVolume(ctx context.Context, r soap.RoundTripper, req type UnmountToolsInstallerBody struct { Req *types.UnmountToolsInstaller `xml:"urn:vim25 UnmountToolsInstaller,omitempty"` - Res *types.UnmountToolsInstallerResponse `xml:"urn:vim25 UnmountToolsInstallerResponse,omitempty"` + Res *types.UnmountToolsInstallerResponse `xml:"UnmountToolsInstallerResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14965,7 +16225,7 @@ func UnmountToolsInstaller(ctx context.Context, r soap.RoundTripper, req *types. type UnmountVffsVolumeBody struct { Req *types.UnmountVffsVolume `xml:"urn:vim25 UnmountVffsVolume,omitempty"` - Res *types.UnmountVffsVolumeResponse `xml:"urn:vim25 UnmountVffsVolumeResponse,omitempty"` + Res *types.UnmountVffsVolumeResponse `xml:"UnmountVffsVolumeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14985,7 +16245,7 @@ func UnmountVffsVolume(ctx context.Context, r soap.RoundTripper, req *types.Unmo type UnmountVmfsVolumeBody struct { Req *types.UnmountVmfsVolume `xml:"urn:vim25 UnmountVmfsVolume,omitempty"` - Res *types.UnmountVmfsVolumeResponse `xml:"urn:vim25 UnmountVmfsVolumeResponse,omitempty"` + Res *types.UnmountVmfsVolumeResponse `xml:"UnmountVmfsVolumeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15005,7 +16265,7 @@ func UnmountVmfsVolume(ctx context.Context, r soap.RoundTripper, req *types.Unmo type UnmountVmfsVolumeEx_TaskBody struct { Req *types.UnmountVmfsVolumeEx_Task `xml:"urn:vim25 UnmountVmfsVolumeEx_Task,omitempty"` - Res *types.UnmountVmfsVolumeEx_TaskResponse `xml:"urn:vim25 UnmountVmfsVolumeEx_TaskResponse,omitempty"` + Res *types.UnmountVmfsVolumeEx_TaskResponse `xml:"UnmountVmfsVolumeEx_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15025,7 +16285,7 @@ func UnmountVmfsVolumeEx_Task(ctx context.Context, r soap.RoundTripper, req *typ type UnregisterAndDestroy_TaskBody struct { Req *types.UnregisterAndDestroy_Task `xml:"urn:vim25 UnregisterAndDestroy_Task,omitempty"` - Res *types.UnregisterAndDestroy_TaskResponse `xml:"urn:vim25 UnregisterAndDestroy_TaskResponse,omitempty"` + Res *types.UnregisterAndDestroy_TaskResponse `xml:"UnregisterAndDestroy_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15045,7 +16305,7 @@ func UnregisterAndDestroy_Task(ctx context.Context, r soap.RoundTripper, req *ty type UnregisterExtensionBody struct { Req *types.UnregisterExtension `xml:"urn:vim25 UnregisterExtension,omitempty"` - Res *types.UnregisterExtensionResponse `xml:"urn:vim25 UnregisterExtensionResponse,omitempty"` + Res *types.UnregisterExtensionResponse `xml:"UnregisterExtensionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15065,7 +16325,7 @@ func UnregisterExtension(ctx context.Context, r soap.RoundTripper, req *types.Un type UnregisterHealthUpdateProviderBody struct { Req *types.UnregisterHealthUpdateProvider `xml:"urn:vim25 UnregisterHealthUpdateProvider,omitempty"` - Res *types.UnregisterHealthUpdateProviderResponse `xml:"urn:vim25 UnregisterHealthUpdateProviderResponse,omitempty"` + Res *types.UnregisterHealthUpdateProviderResponse `xml:"UnregisterHealthUpdateProviderResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15083,9 +16343,29 @@ func UnregisterHealthUpdateProvider(ctx context.Context, r soap.RoundTripper, re return resBody.Res, nil } +type UnregisterKmsClusterBody struct { + Req *types.UnregisterKmsCluster `xml:"urn:vim25 UnregisterKmsCluster,omitempty"` + Res *types.UnregisterKmsClusterResponse `xml:"UnregisterKmsClusterResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UnregisterKmsClusterBody) Fault() *soap.Fault { return b.Fault_ } + +func UnregisterKmsCluster(ctx context.Context, r soap.RoundTripper, req *types.UnregisterKmsCluster) (*types.UnregisterKmsClusterResponse, error) { + var reqBody, resBody UnregisterKmsClusterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type UnregisterVMBody struct { Req *types.UnregisterVM `xml:"urn:vim25 UnregisterVM,omitempty"` - Res *types.UnregisterVMResponse `xml:"urn:vim25 UnregisterVMResponse,omitempty"` + Res *types.UnregisterVMResponse `xml:"UnregisterVMResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15105,7 +16385,7 @@ func UnregisterVM(ctx context.Context, r soap.RoundTripper, req *types.Unregiste type UpdateAnswerFile_TaskBody struct { Req *types.UpdateAnswerFile_Task `xml:"urn:vim25 UpdateAnswerFile_Task,omitempty"` - Res *types.UpdateAnswerFile_TaskResponse `xml:"urn:vim25 UpdateAnswerFile_TaskResponse,omitempty"` + Res *types.UpdateAnswerFile_TaskResponse `xml:"UpdateAnswerFile_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15123,9 +16403,29 @@ func UpdateAnswerFile_Task(ctx context.Context, r soap.RoundTripper, req *types. return resBody.Res, nil } +type UpdateAssignableHardwareConfigBody struct { + Req *types.UpdateAssignableHardwareConfig `xml:"urn:vim25 UpdateAssignableHardwareConfig,omitempty"` + Res *types.UpdateAssignableHardwareConfigResponse `xml:"UpdateAssignableHardwareConfigResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateAssignableHardwareConfigBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateAssignableHardwareConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateAssignableHardwareConfig) (*types.UpdateAssignableHardwareConfigResponse, error) { + var reqBody, resBody UpdateAssignableHardwareConfigBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type UpdateAssignedLicenseBody struct { Req *types.UpdateAssignedLicense `xml:"urn:vim25 UpdateAssignedLicense,omitempty"` - Res *types.UpdateAssignedLicenseResponse `xml:"urn:vim25 UpdateAssignedLicenseResponse,omitempty"` + Res *types.UpdateAssignedLicenseResponse `xml:"UpdateAssignedLicenseResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15145,7 +16445,7 @@ func UpdateAssignedLicense(ctx context.Context, r soap.RoundTripper, req *types. type UpdateAuthorizationRoleBody struct { Req *types.UpdateAuthorizationRole `xml:"urn:vim25 UpdateAuthorizationRole,omitempty"` - Res *types.UpdateAuthorizationRoleResponse `xml:"urn:vim25 UpdateAuthorizationRoleResponse,omitempty"` + Res *types.UpdateAuthorizationRoleResponse `xml:"UpdateAuthorizationRoleResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15165,7 +16465,7 @@ func UpdateAuthorizationRole(ctx context.Context, r soap.RoundTripper, req *type type UpdateBootDeviceBody struct { Req *types.UpdateBootDevice `xml:"urn:vim25 UpdateBootDevice,omitempty"` - Res *types.UpdateBootDeviceResponse `xml:"urn:vim25 UpdateBootDeviceResponse,omitempty"` + Res *types.UpdateBootDeviceResponse `xml:"UpdateBootDeviceResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15185,7 +16485,7 @@ func UpdateBootDevice(ctx context.Context, r soap.RoundTripper, req *types.Updat type UpdateChildResourceConfigurationBody struct { Req *types.UpdateChildResourceConfiguration `xml:"urn:vim25 UpdateChildResourceConfiguration,omitempty"` - Res *types.UpdateChildResourceConfigurationResponse `xml:"urn:vim25 UpdateChildResourceConfigurationResponse,omitempty"` + Res *types.UpdateChildResourceConfigurationResponse `xml:"UpdateChildResourceConfigurationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15205,7 +16505,7 @@ func UpdateChildResourceConfiguration(ctx context.Context, r soap.RoundTripper, type UpdateClusterProfileBody struct { Req *types.UpdateClusterProfile `xml:"urn:vim25 UpdateClusterProfile,omitempty"` - Res *types.UpdateClusterProfileResponse `xml:"urn:vim25 UpdateClusterProfileResponse,omitempty"` + Res *types.UpdateClusterProfileResponse `xml:"UpdateClusterProfileResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15225,7 +16525,7 @@ func UpdateClusterProfile(ctx context.Context, r soap.RoundTripper, req *types.U type UpdateConfigBody struct { Req *types.UpdateConfig `xml:"urn:vim25 UpdateConfig,omitempty"` - Res *types.UpdateConfigResponse `xml:"urn:vim25 UpdateConfigResponse,omitempty"` + Res *types.UpdateConfigResponse `xml:"UpdateConfigResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15245,7 +16545,7 @@ func UpdateConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateCon type UpdateConsoleIpRouteConfigBody struct { Req *types.UpdateConsoleIpRouteConfig `xml:"urn:vim25 UpdateConsoleIpRouteConfig,omitempty"` - Res *types.UpdateConsoleIpRouteConfigResponse `xml:"urn:vim25 UpdateConsoleIpRouteConfigResponse,omitempty"` + Res *types.UpdateConsoleIpRouteConfigResponse `xml:"UpdateConsoleIpRouteConfigResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15265,7 +16565,7 @@ func UpdateConsoleIpRouteConfig(ctx context.Context, r soap.RoundTripper, req *t type UpdateCounterLevelMappingBody struct { Req *types.UpdateCounterLevelMapping `xml:"urn:vim25 UpdateCounterLevelMapping,omitempty"` - Res *types.UpdateCounterLevelMappingResponse `xml:"urn:vim25 UpdateCounterLevelMappingResponse,omitempty"` + Res *types.UpdateCounterLevelMappingResponse `xml:"UpdateCounterLevelMappingResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15285,7 +16585,7 @@ func UpdateCounterLevelMapping(ctx context.Context, r soap.RoundTripper, req *ty type UpdateDVSHealthCheckConfig_TaskBody struct { Req *types.UpdateDVSHealthCheckConfig_Task `xml:"urn:vim25 UpdateDVSHealthCheckConfig_Task,omitempty"` - Res *types.UpdateDVSHealthCheckConfig_TaskResponse `xml:"urn:vim25 UpdateDVSHealthCheckConfig_TaskResponse,omitempty"` + Res *types.UpdateDVSHealthCheckConfig_TaskResponse `xml:"UpdateDVSHealthCheckConfig_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15305,7 +16605,7 @@ func UpdateDVSHealthCheckConfig_Task(ctx context.Context, r soap.RoundTripper, r type UpdateDVSLacpGroupConfig_TaskBody struct { Req *types.UpdateDVSLacpGroupConfig_Task `xml:"urn:vim25 UpdateDVSLacpGroupConfig_Task,omitempty"` - Res *types.UpdateDVSLacpGroupConfig_TaskResponse `xml:"urn:vim25 UpdateDVSLacpGroupConfig_TaskResponse,omitempty"` + Res *types.UpdateDVSLacpGroupConfig_TaskResponse `xml:"UpdateDVSLacpGroupConfig_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15325,7 +16625,7 @@ func UpdateDVSLacpGroupConfig_Task(ctx context.Context, r soap.RoundTripper, req type UpdateDateTimeBody struct { Req *types.UpdateDateTime `xml:"urn:vim25 UpdateDateTime,omitempty"` - Res *types.UpdateDateTimeResponse `xml:"urn:vim25 UpdateDateTimeResponse,omitempty"` + Res *types.UpdateDateTimeResponse `xml:"UpdateDateTimeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15345,7 +16645,7 @@ func UpdateDateTime(ctx context.Context, r soap.RoundTripper, req *types.UpdateD type UpdateDateTimeConfigBody struct { Req *types.UpdateDateTimeConfig `xml:"urn:vim25 UpdateDateTimeConfig,omitempty"` - Res *types.UpdateDateTimeConfigResponse `xml:"urn:vim25 UpdateDateTimeConfigResponse,omitempty"` + Res *types.UpdateDateTimeConfigResponse `xml:"UpdateDateTimeConfigResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15365,7 +16665,7 @@ func UpdateDateTimeConfig(ctx context.Context, r soap.RoundTripper, req *types.U type UpdateDefaultPolicyBody struct { Req *types.UpdateDefaultPolicy `xml:"urn:vim25 UpdateDefaultPolicy,omitempty"` - Res *types.UpdateDefaultPolicyResponse `xml:"urn:vim25 UpdateDefaultPolicyResponse,omitempty"` + Res *types.UpdateDefaultPolicyResponse `xml:"UpdateDefaultPolicyResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15385,7 +16685,7 @@ func UpdateDefaultPolicy(ctx context.Context, r soap.RoundTripper, req *types.Up type UpdateDiskPartitionsBody struct { Req *types.UpdateDiskPartitions `xml:"urn:vim25 UpdateDiskPartitions,omitempty"` - Res *types.UpdateDiskPartitionsResponse `xml:"urn:vim25 UpdateDiskPartitionsResponse,omitempty"` + Res *types.UpdateDiskPartitionsResponse `xml:"UpdateDiskPartitionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15405,7 +16705,7 @@ func UpdateDiskPartitions(ctx context.Context, r soap.RoundTripper, req *types.U type UpdateDnsConfigBody struct { Req *types.UpdateDnsConfig `xml:"urn:vim25 UpdateDnsConfig,omitempty"` - Res *types.UpdateDnsConfigResponse `xml:"urn:vim25 UpdateDnsConfigResponse,omitempty"` + Res *types.UpdateDnsConfigResponse `xml:"UpdateDnsConfigResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15425,7 +16725,7 @@ func UpdateDnsConfig(ctx context.Context, r soap.RoundTripper, req *types.Update type UpdateDvsCapabilityBody struct { Req *types.UpdateDvsCapability `xml:"urn:vim25 UpdateDvsCapability,omitempty"` - Res *types.UpdateDvsCapabilityResponse `xml:"urn:vim25 UpdateDvsCapabilityResponse,omitempty"` + Res *types.UpdateDvsCapabilityResponse `xml:"UpdateDvsCapabilityResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15445,7 +16745,7 @@ func UpdateDvsCapability(ctx context.Context, r soap.RoundTripper, req *types.Up type UpdateExtensionBody struct { Req *types.UpdateExtension `xml:"urn:vim25 UpdateExtension,omitempty"` - Res *types.UpdateExtensionResponse `xml:"urn:vim25 UpdateExtensionResponse,omitempty"` + Res *types.UpdateExtensionResponse `xml:"UpdateExtensionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15465,7 +16765,7 @@ func UpdateExtension(ctx context.Context, r soap.RoundTripper, req *types.Update type UpdateFlagsBody struct { Req *types.UpdateFlags `xml:"urn:vim25 UpdateFlags,omitempty"` - Res *types.UpdateFlagsResponse `xml:"urn:vim25 UpdateFlagsResponse,omitempty"` + Res *types.UpdateFlagsResponse `xml:"UpdateFlagsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15485,7 +16785,7 @@ func UpdateFlags(ctx context.Context, r soap.RoundTripper, req *types.UpdateFlag type UpdateGraphicsConfigBody struct { Req *types.UpdateGraphicsConfig `xml:"urn:vim25 UpdateGraphicsConfig,omitempty"` - Res *types.UpdateGraphicsConfigResponse `xml:"urn:vim25 UpdateGraphicsConfigResponse,omitempty"` + Res *types.UpdateGraphicsConfigResponse `xml:"UpdateGraphicsConfigResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15505,7 +16805,7 @@ func UpdateGraphicsConfig(ctx context.Context, r soap.RoundTripper, req *types.U type UpdateHostCustomizations_TaskBody struct { Req *types.UpdateHostCustomizations_Task `xml:"urn:vim25 UpdateHostCustomizations_Task,omitempty"` - Res *types.UpdateHostCustomizations_TaskResponse `xml:"urn:vim25 UpdateHostCustomizations_TaskResponse,omitempty"` + Res *types.UpdateHostCustomizations_TaskResponse `xml:"UpdateHostCustomizations_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15525,7 +16825,7 @@ func UpdateHostCustomizations_Task(ctx context.Context, r soap.RoundTripper, req type UpdateHostImageAcceptanceLevelBody struct { Req *types.UpdateHostImageAcceptanceLevel `xml:"urn:vim25 UpdateHostImageAcceptanceLevel,omitempty"` - Res *types.UpdateHostImageAcceptanceLevelResponse `xml:"urn:vim25 UpdateHostImageAcceptanceLevelResponse,omitempty"` + Res *types.UpdateHostImageAcceptanceLevelResponse `xml:"UpdateHostImageAcceptanceLevelResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15545,7 +16845,7 @@ func UpdateHostImageAcceptanceLevel(ctx context.Context, r soap.RoundTripper, re type UpdateHostProfileBody struct { Req *types.UpdateHostProfile `xml:"urn:vim25 UpdateHostProfile,omitempty"` - Res *types.UpdateHostProfileResponse `xml:"urn:vim25 UpdateHostProfileResponse,omitempty"` + Res *types.UpdateHostProfileResponse `xml:"UpdateHostProfileResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15565,7 +16865,7 @@ func UpdateHostProfile(ctx context.Context, r soap.RoundTripper, req *types.Upda type UpdateHostSpecificationBody struct { Req *types.UpdateHostSpecification `xml:"urn:vim25 UpdateHostSpecification,omitempty"` - Res *types.UpdateHostSpecificationResponse `xml:"urn:vim25 UpdateHostSpecificationResponse,omitempty"` + Res *types.UpdateHostSpecificationResponse `xml:"UpdateHostSpecificationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15585,7 +16885,7 @@ func UpdateHostSpecification(ctx context.Context, r soap.RoundTripper, req *type type UpdateHostSubSpecificationBody struct { Req *types.UpdateHostSubSpecification `xml:"urn:vim25 UpdateHostSubSpecification,omitempty"` - Res *types.UpdateHostSubSpecificationResponse `xml:"urn:vim25 UpdateHostSubSpecificationResponse,omitempty"` + Res *types.UpdateHostSubSpecificationResponse `xml:"UpdateHostSubSpecificationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15603,9 +16903,29 @@ func UpdateHostSubSpecification(ctx context.Context, r soap.RoundTripper, req *t return resBody.Res, nil } +type UpdateHppMultipathLunPolicyBody struct { + Req *types.UpdateHppMultipathLunPolicy `xml:"urn:vim25 UpdateHppMultipathLunPolicy,omitempty"` + Res *types.UpdateHppMultipathLunPolicyResponse `xml:"UpdateHppMultipathLunPolicyResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateHppMultipathLunPolicyBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateHppMultipathLunPolicy(ctx context.Context, r soap.RoundTripper, req *types.UpdateHppMultipathLunPolicy) (*types.UpdateHppMultipathLunPolicyResponse, error) { + var reqBody, resBody UpdateHppMultipathLunPolicyBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type UpdateInternetScsiAdvancedOptionsBody struct { Req *types.UpdateInternetScsiAdvancedOptions `xml:"urn:vim25 UpdateInternetScsiAdvancedOptions,omitempty"` - Res *types.UpdateInternetScsiAdvancedOptionsResponse `xml:"urn:vim25 UpdateInternetScsiAdvancedOptionsResponse,omitempty"` + Res *types.UpdateInternetScsiAdvancedOptionsResponse `xml:"UpdateInternetScsiAdvancedOptionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15625,7 +16945,7 @@ func UpdateInternetScsiAdvancedOptions(ctx context.Context, r soap.RoundTripper, type UpdateInternetScsiAliasBody struct { Req *types.UpdateInternetScsiAlias `xml:"urn:vim25 UpdateInternetScsiAlias,omitempty"` - Res *types.UpdateInternetScsiAliasResponse `xml:"urn:vim25 UpdateInternetScsiAliasResponse,omitempty"` + Res *types.UpdateInternetScsiAliasResponse `xml:"UpdateInternetScsiAliasResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15645,7 +16965,7 @@ func UpdateInternetScsiAlias(ctx context.Context, r soap.RoundTripper, req *type type UpdateInternetScsiAuthenticationPropertiesBody struct { Req *types.UpdateInternetScsiAuthenticationProperties `xml:"urn:vim25 UpdateInternetScsiAuthenticationProperties,omitempty"` - Res *types.UpdateInternetScsiAuthenticationPropertiesResponse `xml:"urn:vim25 UpdateInternetScsiAuthenticationPropertiesResponse,omitempty"` + Res *types.UpdateInternetScsiAuthenticationPropertiesResponse `xml:"UpdateInternetScsiAuthenticationPropertiesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15665,7 +16985,7 @@ func UpdateInternetScsiAuthenticationProperties(ctx context.Context, r soap.Roun type UpdateInternetScsiDigestPropertiesBody struct { Req *types.UpdateInternetScsiDigestProperties `xml:"urn:vim25 UpdateInternetScsiDigestProperties,omitempty"` - Res *types.UpdateInternetScsiDigestPropertiesResponse `xml:"urn:vim25 UpdateInternetScsiDigestPropertiesResponse,omitempty"` + Res *types.UpdateInternetScsiDigestPropertiesResponse `xml:"UpdateInternetScsiDigestPropertiesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15685,7 +17005,7 @@ func UpdateInternetScsiDigestProperties(ctx context.Context, r soap.RoundTripper type UpdateInternetScsiDiscoveryPropertiesBody struct { Req *types.UpdateInternetScsiDiscoveryProperties `xml:"urn:vim25 UpdateInternetScsiDiscoveryProperties,omitempty"` - Res *types.UpdateInternetScsiDiscoveryPropertiesResponse `xml:"urn:vim25 UpdateInternetScsiDiscoveryPropertiesResponse,omitempty"` + Res *types.UpdateInternetScsiDiscoveryPropertiesResponse `xml:"UpdateInternetScsiDiscoveryPropertiesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15705,7 +17025,7 @@ func UpdateInternetScsiDiscoveryProperties(ctx context.Context, r soap.RoundTrip type UpdateInternetScsiIPPropertiesBody struct { Req *types.UpdateInternetScsiIPProperties `xml:"urn:vim25 UpdateInternetScsiIPProperties,omitempty"` - Res *types.UpdateInternetScsiIPPropertiesResponse `xml:"urn:vim25 UpdateInternetScsiIPPropertiesResponse,omitempty"` + Res *types.UpdateInternetScsiIPPropertiesResponse `xml:"UpdateInternetScsiIPPropertiesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15725,7 +17045,7 @@ func UpdateInternetScsiIPProperties(ctx context.Context, r soap.RoundTripper, re type UpdateInternetScsiNameBody struct { Req *types.UpdateInternetScsiName `xml:"urn:vim25 UpdateInternetScsiName,omitempty"` - Res *types.UpdateInternetScsiNameResponse `xml:"urn:vim25 UpdateInternetScsiNameResponse,omitempty"` + Res *types.UpdateInternetScsiNameResponse `xml:"UpdateInternetScsiNameResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15745,7 +17065,7 @@ func UpdateInternetScsiName(ctx context.Context, r soap.RoundTripper, req *types type UpdateIpConfigBody struct { Req *types.UpdateIpConfig `xml:"urn:vim25 UpdateIpConfig,omitempty"` - Res *types.UpdateIpConfigResponse `xml:"urn:vim25 UpdateIpConfigResponse,omitempty"` + Res *types.UpdateIpConfigResponse `xml:"UpdateIpConfigResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15765,7 +17085,7 @@ func UpdateIpConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateI type UpdateIpPoolBody struct { Req *types.UpdateIpPool `xml:"urn:vim25 UpdateIpPool,omitempty"` - Res *types.UpdateIpPoolResponse `xml:"urn:vim25 UpdateIpPoolResponse,omitempty"` + Res *types.UpdateIpPoolResponse `xml:"UpdateIpPoolResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15785,7 +17105,7 @@ func UpdateIpPool(ctx context.Context, r soap.RoundTripper, req *types.UpdateIpP type UpdateIpRouteConfigBody struct { Req *types.UpdateIpRouteConfig `xml:"urn:vim25 UpdateIpRouteConfig,omitempty"` - Res *types.UpdateIpRouteConfigResponse `xml:"urn:vim25 UpdateIpRouteConfigResponse,omitempty"` + Res *types.UpdateIpRouteConfigResponse `xml:"UpdateIpRouteConfigResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15805,7 +17125,7 @@ func UpdateIpRouteConfig(ctx context.Context, r soap.RoundTripper, req *types.Up type UpdateIpRouteTableConfigBody struct { Req *types.UpdateIpRouteTableConfig `xml:"urn:vim25 UpdateIpRouteTableConfig,omitempty"` - Res *types.UpdateIpRouteTableConfigResponse `xml:"urn:vim25 UpdateIpRouteTableConfigResponse,omitempty"` + Res *types.UpdateIpRouteTableConfigResponse `xml:"UpdateIpRouteTableConfigResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15825,7 +17145,7 @@ func UpdateIpRouteTableConfig(ctx context.Context, r soap.RoundTripper, req *typ type UpdateIpmiBody struct { Req *types.UpdateIpmi `xml:"urn:vim25 UpdateIpmi,omitempty"` - Res *types.UpdateIpmiResponse `xml:"urn:vim25 UpdateIpmiResponse,omitempty"` + Res *types.UpdateIpmiResponse `xml:"UpdateIpmiResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15845,7 +17165,7 @@ func UpdateIpmi(ctx context.Context, r soap.RoundTripper, req *types.UpdateIpmi) type UpdateKmipServerBody struct { Req *types.UpdateKmipServer `xml:"urn:vim25 UpdateKmipServer,omitempty"` - Res *types.UpdateKmipServerResponse `xml:"urn:vim25 UpdateKmipServerResponse,omitempty"` + Res *types.UpdateKmipServerResponse `xml:"UpdateKmipServerResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15865,7 +17185,7 @@ func UpdateKmipServer(ctx context.Context, r soap.RoundTripper, req *types.Updat type UpdateKmsSignedCsrClientCertBody struct { Req *types.UpdateKmsSignedCsrClientCert `xml:"urn:vim25 UpdateKmsSignedCsrClientCert,omitempty"` - Res *types.UpdateKmsSignedCsrClientCertResponse `xml:"urn:vim25 UpdateKmsSignedCsrClientCertResponse,omitempty"` + Res *types.UpdateKmsSignedCsrClientCertResponse `xml:"UpdateKmsSignedCsrClientCertResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15885,7 +17205,7 @@ func UpdateKmsSignedCsrClientCert(ctx context.Context, r soap.RoundTripper, req type UpdateLicenseBody struct { Req *types.UpdateLicense `xml:"urn:vim25 UpdateLicense,omitempty"` - Res *types.UpdateLicenseResponse `xml:"urn:vim25 UpdateLicenseResponse,omitempty"` + Res *types.UpdateLicenseResponse `xml:"UpdateLicenseResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15905,7 +17225,7 @@ func UpdateLicense(ctx context.Context, r soap.RoundTripper, req *types.UpdateLi type UpdateLicenseLabelBody struct { Req *types.UpdateLicenseLabel `xml:"urn:vim25 UpdateLicenseLabel,omitempty"` - Res *types.UpdateLicenseLabelResponse `xml:"urn:vim25 UpdateLicenseLabelResponse,omitempty"` + Res *types.UpdateLicenseLabelResponse `xml:"UpdateLicenseLabelResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15925,7 +17245,7 @@ func UpdateLicenseLabel(ctx context.Context, r soap.RoundTripper, req *types.Upd type UpdateLinkedChildrenBody struct { Req *types.UpdateLinkedChildren `xml:"urn:vim25 UpdateLinkedChildren,omitempty"` - Res *types.UpdateLinkedChildrenResponse `xml:"urn:vim25 UpdateLinkedChildrenResponse,omitempty"` + Res *types.UpdateLinkedChildrenResponse `xml:"UpdateLinkedChildrenResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15945,7 +17265,7 @@ func UpdateLinkedChildren(ctx context.Context, r soap.RoundTripper, req *types.U type UpdateLocalSwapDatastoreBody struct { Req *types.UpdateLocalSwapDatastore `xml:"urn:vim25 UpdateLocalSwapDatastore,omitempty"` - Res *types.UpdateLocalSwapDatastoreResponse `xml:"urn:vim25 UpdateLocalSwapDatastoreResponse,omitempty"` + Res *types.UpdateLocalSwapDatastoreResponse `xml:"UpdateLocalSwapDatastoreResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15965,7 +17285,7 @@ func UpdateLocalSwapDatastore(ctx context.Context, r soap.RoundTripper, req *typ type UpdateLockdownExceptionsBody struct { Req *types.UpdateLockdownExceptions `xml:"urn:vim25 UpdateLockdownExceptions,omitempty"` - Res *types.UpdateLockdownExceptionsResponse `xml:"urn:vim25 UpdateLockdownExceptionsResponse,omitempty"` + Res *types.UpdateLockdownExceptionsResponse `xml:"UpdateLockdownExceptionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15985,7 +17305,7 @@ func UpdateLockdownExceptions(ctx context.Context, r soap.RoundTripper, req *typ type UpdateModuleOptionStringBody struct { Req *types.UpdateModuleOptionString `xml:"urn:vim25 UpdateModuleOptionString,omitempty"` - Res *types.UpdateModuleOptionStringResponse `xml:"urn:vim25 UpdateModuleOptionStringResponse,omitempty"` + Res *types.UpdateModuleOptionStringResponse `xml:"UpdateModuleOptionStringResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16005,7 +17325,7 @@ func UpdateModuleOptionString(ctx context.Context, r soap.RoundTripper, req *typ type UpdateNetworkConfigBody struct { Req *types.UpdateNetworkConfig `xml:"urn:vim25 UpdateNetworkConfig,omitempty"` - Res *types.UpdateNetworkConfigResponse `xml:"urn:vim25 UpdateNetworkConfigResponse,omitempty"` + Res *types.UpdateNetworkConfigResponse `xml:"UpdateNetworkConfigResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16025,7 +17345,7 @@ func UpdateNetworkConfig(ctx context.Context, r soap.RoundTripper, req *types.Up type UpdateNetworkResourcePoolBody struct { Req *types.UpdateNetworkResourcePool `xml:"urn:vim25 UpdateNetworkResourcePool,omitempty"` - Res *types.UpdateNetworkResourcePoolResponse `xml:"urn:vim25 UpdateNetworkResourcePoolResponse,omitempty"` + Res *types.UpdateNetworkResourcePoolResponse `xml:"UpdateNetworkResourcePoolResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16045,7 +17365,7 @@ func UpdateNetworkResourcePool(ctx context.Context, r soap.RoundTripper, req *ty type UpdateOptionsBody struct { Req *types.UpdateOptions `xml:"urn:vim25 UpdateOptions,omitempty"` - Res *types.UpdateOptionsResponse `xml:"urn:vim25 UpdateOptionsResponse,omitempty"` + Res *types.UpdateOptionsResponse `xml:"UpdateOptionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16065,7 +17385,7 @@ func UpdateOptions(ctx context.Context, r soap.RoundTripper, req *types.UpdateOp type UpdatePassthruConfigBody struct { Req *types.UpdatePassthruConfig `xml:"urn:vim25 UpdatePassthruConfig,omitempty"` - Res *types.UpdatePassthruConfigResponse `xml:"urn:vim25 UpdatePassthruConfigResponse,omitempty"` + Res *types.UpdatePassthruConfigResponse `xml:"UpdatePassthruConfigResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16085,7 +17405,7 @@ func UpdatePassthruConfig(ctx context.Context, r soap.RoundTripper, req *types.U type UpdatePerfIntervalBody struct { Req *types.UpdatePerfInterval `xml:"urn:vim25 UpdatePerfInterval,omitempty"` - Res *types.UpdatePerfIntervalResponse `xml:"urn:vim25 UpdatePerfIntervalResponse,omitempty"` + Res *types.UpdatePerfIntervalResponse `xml:"UpdatePerfIntervalResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16105,7 +17425,7 @@ func UpdatePerfInterval(ctx context.Context, r soap.RoundTripper, req *types.Upd type UpdatePhysicalNicLinkSpeedBody struct { Req *types.UpdatePhysicalNicLinkSpeed `xml:"urn:vim25 UpdatePhysicalNicLinkSpeed,omitempty"` - Res *types.UpdatePhysicalNicLinkSpeedResponse `xml:"urn:vim25 UpdatePhysicalNicLinkSpeedResponse,omitempty"` + Res *types.UpdatePhysicalNicLinkSpeedResponse `xml:"UpdatePhysicalNicLinkSpeedResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16125,7 +17445,7 @@ func UpdatePhysicalNicLinkSpeed(ctx context.Context, r soap.RoundTripper, req *t type UpdatePortGroupBody struct { Req *types.UpdatePortGroup `xml:"urn:vim25 UpdatePortGroup,omitempty"` - Res *types.UpdatePortGroupResponse `xml:"urn:vim25 UpdatePortGroupResponse,omitempty"` + Res *types.UpdatePortGroupResponse `xml:"UpdatePortGroupResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16143,9 +17463,29 @@ func UpdatePortGroup(ctx context.Context, r soap.RoundTripper, req *types.Update return resBody.Res, nil } +type UpdateProductLockerLocation_TaskBody struct { + Req *types.UpdateProductLockerLocation_Task `xml:"urn:vim25 UpdateProductLockerLocation_Task,omitempty"` + Res *types.UpdateProductLockerLocation_TaskResponse `xml:"UpdateProductLockerLocation_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateProductLockerLocation_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateProductLockerLocation_Task(ctx context.Context, r soap.RoundTripper, req *types.UpdateProductLockerLocation_Task) (*types.UpdateProductLockerLocation_TaskResponse, error) { + var reqBody, resBody UpdateProductLockerLocation_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type UpdateProgressBody struct { Req *types.UpdateProgress `xml:"urn:vim25 UpdateProgress,omitempty"` - Res *types.UpdateProgressResponse `xml:"urn:vim25 UpdateProgressResponse,omitempty"` + Res *types.UpdateProgressResponse `xml:"UpdateProgressResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16165,7 +17505,7 @@ func UpdateProgress(ctx context.Context, r soap.RoundTripper, req *types.UpdateP type UpdateReferenceHostBody struct { Req *types.UpdateReferenceHost `xml:"urn:vim25 UpdateReferenceHost,omitempty"` - Res *types.UpdateReferenceHostResponse `xml:"urn:vim25 UpdateReferenceHostResponse,omitempty"` + Res *types.UpdateReferenceHostResponse `xml:"UpdateReferenceHostResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16185,7 +17525,7 @@ func UpdateReferenceHost(ctx context.Context, r soap.RoundTripper, req *types.Up type UpdateRulesetBody struct { Req *types.UpdateRuleset `xml:"urn:vim25 UpdateRuleset,omitempty"` - Res *types.UpdateRulesetResponse `xml:"urn:vim25 UpdateRulesetResponse,omitempty"` + Res *types.UpdateRulesetResponse `xml:"UpdateRulesetResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16205,7 +17545,7 @@ func UpdateRuleset(ctx context.Context, r soap.RoundTripper, req *types.UpdateRu type UpdateScsiLunDisplayNameBody struct { Req *types.UpdateScsiLunDisplayName `xml:"urn:vim25 UpdateScsiLunDisplayName,omitempty"` - Res *types.UpdateScsiLunDisplayNameResponse `xml:"urn:vim25 UpdateScsiLunDisplayNameResponse,omitempty"` + Res *types.UpdateScsiLunDisplayNameResponse `xml:"UpdateScsiLunDisplayNameResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16225,7 +17565,7 @@ func UpdateScsiLunDisplayName(ctx context.Context, r soap.RoundTripper, req *typ type UpdateSelfSignedClientCertBody struct { Req *types.UpdateSelfSignedClientCert `xml:"urn:vim25 UpdateSelfSignedClientCert,omitempty"` - Res *types.UpdateSelfSignedClientCertResponse `xml:"urn:vim25 UpdateSelfSignedClientCertResponse,omitempty"` + Res *types.UpdateSelfSignedClientCertResponse `xml:"UpdateSelfSignedClientCertResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16245,7 +17585,7 @@ func UpdateSelfSignedClientCert(ctx context.Context, r soap.RoundTripper, req *t type UpdateServiceConsoleVirtualNicBody struct { Req *types.UpdateServiceConsoleVirtualNic `xml:"urn:vim25 UpdateServiceConsoleVirtualNic,omitempty"` - Res *types.UpdateServiceConsoleVirtualNicResponse `xml:"urn:vim25 UpdateServiceConsoleVirtualNicResponse,omitempty"` + Res *types.UpdateServiceConsoleVirtualNicResponse `xml:"UpdateServiceConsoleVirtualNicResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16265,7 +17605,7 @@ func UpdateServiceConsoleVirtualNic(ctx context.Context, r soap.RoundTripper, re type UpdateServiceMessageBody struct { Req *types.UpdateServiceMessage `xml:"urn:vim25 UpdateServiceMessage,omitempty"` - Res *types.UpdateServiceMessageResponse `xml:"urn:vim25 UpdateServiceMessageResponse,omitempty"` + Res *types.UpdateServiceMessageResponse `xml:"UpdateServiceMessageResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16285,7 +17625,7 @@ func UpdateServiceMessage(ctx context.Context, r soap.RoundTripper, req *types.U type UpdateServicePolicyBody struct { Req *types.UpdateServicePolicy `xml:"urn:vim25 UpdateServicePolicy,omitempty"` - Res *types.UpdateServicePolicyResponse `xml:"urn:vim25 UpdateServicePolicyResponse,omitempty"` + Res *types.UpdateServicePolicyResponse `xml:"UpdateServicePolicyResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16305,7 +17645,7 @@ func UpdateServicePolicy(ctx context.Context, r soap.RoundTripper, req *types.Up type UpdateSoftwareInternetScsiEnabledBody struct { Req *types.UpdateSoftwareInternetScsiEnabled `xml:"urn:vim25 UpdateSoftwareInternetScsiEnabled,omitempty"` - Res *types.UpdateSoftwareInternetScsiEnabledResponse `xml:"urn:vim25 UpdateSoftwareInternetScsiEnabledResponse,omitempty"` + Res *types.UpdateSoftwareInternetScsiEnabledResponse `xml:"UpdateSoftwareInternetScsiEnabledResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16325,7 +17665,7 @@ func UpdateSoftwareInternetScsiEnabled(ctx context.Context, r soap.RoundTripper, type UpdateSystemResourcesBody struct { Req *types.UpdateSystemResources `xml:"urn:vim25 UpdateSystemResources,omitempty"` - Res *types.UpdateSystemResourcesResponse `xml:"urn:vim25 UpdateSystemResourcesResponse,omitempty"` + Res *types.UpdateSystemResourcesResponse `xml:"UpdateSystemResourcesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16345,7 +17685,7 @@ func UpdateSystemResources(ctx context.Context, r soap.RoundTripper, req *types. type UpdateSystemSwapConfigurationBody struct { Req *types.UpdateSystemSwapConfiguration `xml:"urn:vim25 UpdateSystemSwapConfiguration,omitempty"` - Res *types.UpdateSystemSwapConfigurationResponse `xml:"urn:vim25 UpdateSystemSwapConfigurationResponse,omitempty"` + Res *types.UpdateSystemSwapConfigurationResponse `xml:"UpdateSystemSwapConfigurationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16365,7 +17705,7 @@ func UpdateSystemSwapConfiguration(ctx context.Context, r soap.RoundTripper, req type UpdateSystemUsersBody struct { Req *types.UpdateSystemUsers `xml:"urn:vim25 UpdateSystemUsers,omitempty"` - Res *types.UpdateSystemUsersResponse `xml:"urn:vim25 UpdateSystemUsersResponse,omitempty"` + Res *types.UpdateSystemUsersResponse `xml:"UpdateSystemUsersResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16385,7 +17725,7 @@ func UpdateSystemUsers(ctx context.Context, r soap.RoundTripper, req *types.Upda type UpdateUserBody struct { Req *types.UpdateUser `xml:"urn:vim25 UpdateUser,omitempty"` - Res *types.UpdateUserResponse `xml:"urn:vim25 UpdateUserResponse,omitempty"` + Res *types.UpdateUserResponse `xml:"UpdateUserResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16405,7 +17745,7 @@ func UpdateUser(ctx context.Context, r soap.RoundTripper, req *types.UpdateUser) type UpdateVAppConfigBody struct { Req *types.UpdateVAppConfig `xml:"urn:vim25 UpdateVAppConfig,omitempty"` - Res *types.UpdateVAppConfigResponse `xml:"urn:vim25 UpdateVAppConfigResponse,omitempty"` + Res *types.UpdateVAppConfigResponse `xml:"UpdateVAppConfigResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16425,7 +17765,7 @@ func UpdateVAppConfig(ctx context.Context, r soap.RoundTripper, req *types.Updat type UpdateVStorageInfrastructureObjectPolicy_TaskBody struct { Req *types.UpdateVStorageInfrastructureObjectPolicy_Task `xml:"urn:vim25 UpdateVStorageInfrastructureObjectPolicy_Task,omitempty"` - Res *types.UpdateVStorageInfrastructureObjectPolicy_TaskResponse `xml:"urn:vim25 UpdateVStorageInfrastructureObjectPolicy_TaskResponse,omitempty"` + Res *types.UpdateVStorageInfrastructureObjectPolicy_TaskResponse `xml:"UpdateVStorageInfrastructureObjectPolicy_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16443,9 +17783,29 @@ func UpdateVStorageInfrastructureObjectPolicy_Task(ctx context.Context, r soap.R return resBody.Res, nil } +type UpdateVStorageObjectCrypto_TaskBody struct { + Req *types.UpdateVStorageObjectCrypto_Task `xml:"urn:vim25 UpdateVStorageObjectCrypto_Task,omitempty"` + Res *types.UpdateVStorageObjectCrypto_TaskResponse `xml:"UpdateVStorageObjectCrypto_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateVStorageObjectCrypto_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateVStorageObjectCrypto_Task(ctx context.Context, r soap.RoundTripper, req *types.UpdateVStorageObjectCrypto_Task) (*types.UpdateVStorageObjectCrypto_TaskResponse, error) { + var reqBody, resBody UpdateVStorageObjectCrypto_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type UpdateVStorageObjectPolicy_TaskBody struct { Req *types.UpdateVStorageObjectPolicy_Task `xml:"urn:vim25 UpdateVStorageObjectPolicy_Task,omitempty"` - Res *types.UpdateVStorageObjectPolicy_TaskResponse `xml:"urn:vim25 UpdateVStorageObjectPolicy_TaskResponse,omitempty"` + Res *types.UpdateVStorageObjectPolicy_TaskResponse `xml:"UpdateVStorageObjectPolicy_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16465,7 +17825,7 @@ func UpdateVStorageObjectPolicy_Task(ctx context.Context, r soap.RoundTripper, r type UpdateVVolVirtualMachineFiles_TaskBody struct { Req *types.UpdateVVolVirtualMachineFiles_Task `xml:"urn:vim25 UpdateVVolVirtualMachineFiles_Task,omitempty"` - Res *types.UpdateVVolVirtualMachineFiles_TaskResponse `xml:"urn:vim25 UpdateVVolVirtualMachineFiles_TaskResponse,omitempty"` + Res *types.UpdateVVolVirtualMachineFiles_TaskResponse `xml:"UpdateVVolVirtualMachineFiles_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16485,7 +17845,7 @@ func UpdateVVolVirtualMachineFiles_Task(ctx context.Context, r soap.RoundTripper type UpdateVirtualMachineFiles_TaskBody struct { Req *types.UpdateVirtualMachineFiles_Task `xml:"urn:vim25 UpdateVirtualMachineFiles_Task,omitempty"` - Res *types.UpdateVirtualMachineFiles_TaskResponse `xml:"urn:vim25 UpdateVirtualMachineFiles_TaskResponse,omitempty"` + Res *types.UpdateVirtualMachineFiles_TaskResponse `xml:"UpdateVirtualMachineFiles_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16505,7 +17865,7 @@ func UpdateVirtualMachineFiles_Task(ctx context.Context, r soap.RoundTripper, re type UpdateVirtualNicBody struct { Req *types.UpdateVirtualNic `xml:"urn:vim25 UpdateVirtualNic,omitempty"` - Res *types.UpdateVirtualNicResponse `xml:"urn:vim25 UpdateVirtualNicResponse,omitempty"` + Res *types.UpdateVirtualNicResponse `xml:"UpdateVirtualNicResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16525,7 +17885,7 @@ func UpdateVirtualNic(ctx context.Context, r soap.RoundTripper, req *types.Updat type UpdateVirtualSwitchBody struct { Req *types.UpdateVirtualSwitch `xml:"urn:vim25 UpdateVirtualSwitch,omitempty"` - Res *types.UpdateVirtualSwitchResponse `xml:"urn:vim25 UpdateVirtualSwitchResponse,omitempty"` + Res *types.UpdateVirtualSwitchResponse `xml:"UpdateVirtualSwitchResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16545,7 +17905,7 @@ func UpdateVirtualSwitch(ctx context.Context, r soap.RoundTripper, req *types.Up type UpdateVmfsUnmapBandwidthBody struct { Req *types.UpdateVmfsUnmapBandwidth `xml:"urn:vim25 UpdateVmfsUnmapBandwidth,omitempty"` - Res *types.UpdateVmfsUnmapBandwidthResponse `xml:"urn:vim25 UpdateVmfsUnmapBandwidthResponse,omitempty"` + Res *types.UpdateVmfsUnmapBandwidthResponse `xml:"UpdateVmfsUnmapBandwidthResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16565,7 +17925,7 @@ func UpdateVmfsUnmapBandwidth(ctx context.Context, r soap.RoundTripper, req *typ type UpdateVmfsUnmapPriorityBody struct { Req *types.UpdateVmfsUnmapPriority `xml:"urn:vim25 UpdateVmfsUnmapPriority,omitempty"` - Res *types.UpdateVmfsUnmapPriorityResponse `xml:"urn:vim25 UpdateVmfsUnmapPriorityResponse,omitempty"` + Res *types.UpdateVmfsUnmapPriorityResponse `xml:"UpdateVmfsUnmapPriorityResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16585,7 +17945,7 @@ func UpdateVmfsUnmapPriority(ctx context.Context, r soap.RoundTripper, req *type type UpdateVsan_TaskBody struct { Req *types.UpdateVsan_Task `xml:"urn:vim25 UpdateVsan_Task,omitempty"` - Res *types.UpdateVsan_TaskResponse `xml:"urn:vim25 UpdateVsan_TaskResponse,omitempty"` + Res *types.UpdateVsan_TaskResponse `xml:"UpdateVsan_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16605,7 +17965,7 @@ func UpdateVsan_Task(ctx context.Context, r soap.RoundTripper, req *types.Update type UpgradeIoFilter_TaskBody struct { Req *types.UpgradeIoFilter_Task `xml:"urn:vim25 UpgradeIoFilter_Task,omitempty"` - Res *types.UpgradeIoFilter_TaskResponse `xml:"urn:vim25 UpgradeIoFilter_TaskResponse,omitempty"` + Res *types.UpgradeIoFilter_TaskResponse `xml:"UpgradeIoFilter_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16625,7 +17985,7 @@ func UpgradeIoFilter_Task(ctx context.Context, r soap.RoundTripper, req *types.U type UpgradeTools_TaskBody struct { Req *types.UpgradeTools_Task `xml:"urn:vim25 UpgradeTools_Task,omitempty"` - Res *types.UpgradeTools_TaskResponse `xml:"urn:vim25 UpgradeTools_TaskResponse,omitempty"` + Res *types.UpgradeTools_TaskResponse `xml:"UpgradeTools_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16645,7 +18005,7 @@ func UpgradeTools_Task(ctx context.Context, r soap.RoundTripper, req *types.Upgr type UpgradeVM_TaskBody struct { Req *types.UpgradeVM_Task `xml:"urn:vim25 UpgradeVM_Task,omitempty"` - Res *types.UpgradeVM_TaskResponse `xml:"urn:vim25 UpgradeVM_TaskResponse,omitempty"` + Res *types.UpgradeVM_TaskResponse `xml:"UpgradeVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16665,7 +18025,7 @@ func UpgradeVM_Task(ctx context.Context, r soap.RoundTripper, req *types.Upgrade type UpgradeVmLayoutBody struct { Req *types.UpgradeVmLayout `xml:"urn:vim25 UpgradeVmLayout,omitempty"` - Res *types.UpgradeVmLayoutResponse `xml:"urn:vim25 UpgradeVmLayoutResponse,omitempty"` + Res *types.UpgradeVmLayoutResponse `xml:"UpgradeVmLayoutResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16685,7 +18045,7 @@ func UpgradeVmLayout(ctx context.Context, r soap.RoundTripper, req *types.Upgrad type UpgradeVmfsBody struct { Req *types.UpgradeVmfs `xml:"urn:vim25 UpgradeVmfs,omitempty"` - Res *types.UpgradeVmfsResponse `xml:"urn:vim25 UpgradeVmfsResponse,omitempty"` + Res *types.UpgradeVmfsResponse `xml:"UpgradeVmfsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16705,7 +18065,7 @@ func UpgradeVmfs(ctx context.Context, r soap.RoundTripper, req *types.UpgradeVmf type UpgradeVsanObjectsBody struct { Req *types.UpgradeVsanObjects `xml:"urn:vim25 UpgradeVsanObjects,omitempty"` - Res *types.UpgradeVsanObjectsResponse `xml:"urn:vim25 UpgradeVsanObjectsResponse,omitempty"` + Res *types.UpgradeVsanObjectsResponse `xml:"UpgradeVsanObjectsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16725,7 +18085,7 @@ func UpgradeVsanObjects(ctx context.Context, r soap.RoundTripper, req *types.Upg type UploadClientCertBody struct { Req *types.UploadClientCert `xml:"urn:vim25 UploadClientCert,omitempty"` - Res *types.UploadClientCertResponse `xml:"urn:vim25 UploadClientCertResponse,omitempty"` + Res *types.UploadClientCertResponse `xml:"UploadClientCertResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16745,7 +18105,7 @@ func UploadClientCert(ctx context.Context, r soap.RoundTripper, req *types.Uploa type UploadKmipServerCertBody struct { Req *types.UploadKmipServerCert `xml:"urn:vim25 UploadKmipServerCert,omitempty"` - Res *types.UploadKmipServerCertResponse `xml:"urn:vim25 UploadKmipServerCertResponse,omitempty"` + Res *types.UploadKmipServerCertResponse `xml:"UploadKmipServerCertResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16763,9 +18123,29 @@ func UploadKmipServerCert(ctx context.Context, r soap.RoundTripper, req *types.U return resBody.Res, nil } +type VCenterUpdateVStorageObjectMetadataEx_TaskBody struct { + Req *types.VCenterUpdateVStorageObjectMetadataEx_Task `xml:"urn:vim25 VCenterUpdateVStorageObjectMetadataEx_Task,omitempty"` + Res *types.VCenterUpdateVStorageObjectMetadataEx_TaskResponse `xml:"VCenterUpdateVStorageObjectMetadataEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *VCenterUpdateVStorageObjectMetadataEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func VCenterUpdateVStorageObjectMetadataEx_Task(ctx context.Context, r soap.RoundTripper, req *types.VCenterUpdateVStorageObjectMetadataEx_Task) (*types.VCenterUpdateVStorageObjectMetadataEx_TaskResponse, error) { + var reqBody, resBody VCenterUpdateVStorageObjectMetadataEx_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type VStorageObjectCreateSnapshot_TaskBody struct { Req *types.VStorageObjectCreateSnapshot_Task `xml:"urn:vim25 VStorageObjectCreateSnapshot_Task,omitempty"` - Res *types.VStorageObjectCreateSnapshot_TaskResponse `xml:"urn:vim25 VStorageObjectCreateSnapshot_TaskResponse,omitempty"` + Res *types.VStorageObjectCreateSnapshot_TaskResponse `xml:"VStorageObjectCreateSnapshot_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16785,7 +18165,7 @@ func VStorageObjectCreateSnapshot_Task(ctx context.Context, r soap.RoundTripper, type ValidateCredentialsInGuestBody struct { Req *types.ValidateCredentialsInGuest `xml:"urn:vim25 ValidateCredentialsInGuest,omitempty"` - Res *types.ValidateCredentialsInGuestResponse `xml:"urn:vim25 ValidateCredentialsInGuestResponse,omitempty"` + Res *types.ValidateCredentialsInGuestResponse `xml:"ValidateCredentialsInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16803,9 +18183,29 @@ func ValidateCredentialsInGuest(ctx context.Context, r soap.RoundTripper, req *t return resBody.Res, nil } +type ValidateHCIConfigurationBody struct { + Req *types.ValidateHCIConfiguration `xml:"urn:vim25 ValidateHCIConfiguration,omitempty"` + Res *types.ValidateHCIConfigurationResponse `xml:"ValidateHCIConfigurationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ValidateHCIConfigurationBody) Fault() *soap.Fault { return b.Fault_ } + +func ValidateHCIConfiguration(ctx context.Context, r soap.RoundTripper, req *types.ValidateHCIConfiguration) (*types.ValidateHCIConfigurationResponse, error) { + var reqBody, resBody ValidateHCIConfigurationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type ValidateHostBody struct { Req *types.ValidateHost `xml:"urn:vim25 ValidateHost,omitempty"` - Res *types.ValidateHostResponse `xml:"urn:vim25 ValidateHostResponse,omitempty"` + Res *types.ValidateHostResponse `xml:"ValidateHostResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16825,7 +18225,7 @@ func ValidateHost(ctx context.Context, r soap.RoundTripper, req *types.ValidateH type ValidateHostProfileComposition_TaskBody struct { Req *types.ValidateHostProfileComposition_Task `xml:"urn:vim25 ValidateHostProfileComposition_Task,omitempty"` - Res *types.ValidateHostProfileComposition_TaskResponse `xml:"urn:vim25 ValidateHostProfileComposition_TaskResponse,omitempty"` + Res *types.ValidateHostProfileComposition_TaskResponse `xml:"ValidateHostProfileComposition_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16845,7 +18245,7 @@ func ValidateHostProfileComposition_Task(ctx context.Context, r soap.RoundTrippe type ValidateMigrationBody struct { Req *types.ValidateMigration `xml:"urn:vim25 ValidateMigration,omitempty"` - Res *types.ValidateMigrationResponse `xml:"urn:vim25 ValidateMigrationResponse,omitempty"` + Res *types.ValidateMigrationResponse `xml:"ValidateMigrationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16865,7 +18265,7 @@ func ValidateMigration(ctx context.Context, r soap.RoundTripper, req *types.Vali type ValidateStoragePodConfigBody struct { Req *types.ValidateStoragePodConfig `xml:"urn:vim25 ValidateStoragePodConfig,omitempty"` - Res *types.ValidateStoragePodConfigResponse `xml:"urn:vim25 ValidateStoragePodConfigResponse,omitempty"` + Res *types.ValidateStoragePodConfigResponse `xml:"ValidateStoragePodConfigResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16883,9 +18283,29 @@ func ValidateStoragePodConfig(ctx context.Context, r soap.RoundTripper, req *typ return resBody.Res, nil } +type VstorageObjectVCenterQueryChangedDiskAreasBody struct { + Req *types.VstorageObjectVCenterQueryChangedDiskAreas `xml:"urn:vim25 VstorageObjectVCenterQueryChangedDiskAreas,omitempty"` + Res *types.VstorageObjectVCenterQueryChangedDiskAreasResponse `xml:"VstorageObjectVCenterQueryChangedDiskAreasResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *VstorageObjectVCenterQueryChangedDiskAreasBody) Fault() *soap.Fault { return b.Fault_ } + +func VstorageObjectVCenterQueryChangedDiskAreas(ctx context.Context, r soap.RoundTripper, req *types.VstorageObjectVCenterQueryChangedDiskAreas) (*types.VstorageObjectVCenterQueryChangedDiskAreasResponse, error) { + var reqBody, resBody VstorageObjectVCenterQueryChangedDiskAreasBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type WaitForUpdatesBody struct { Req *types.WaitForUpdates `xml:"urn:vim25 WaitForUpdates,omitempty"` - Res *types.WaitForUpdatesResponse `xml:"urn:vim25 WaitForUpdatesResponse,omitempty"` + Res *types.WaitForUpdatesResponse `xml:"WaitForUpdatesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16905,7 +18325,7 @@ func WaitForUpdates(ctx context.Context, r soap.RoundTripper, req *types.WaitFor type WaitForUpdatesExBody struct { Req *types.WaitForUpdatesEx `xml:"urn:vim25 WaitForUpdatesEx,omitempty"` - Res *types.WaitForUpdatesExResponse `xml:"urn:vim25 WaitForUpdatesExResponse,omitempty"` + Res *types.WaitForUpdatesExResponse `xml:"WaitForUpdatesExResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16925,7 +18345,7 @@ func WaitForUpdatesEx(ctx context.Context, r soap.RoundTripper, req *types.WaitF type XmlToCustomizationSpecItemBody struct { Req *types.XmlToCustomizationSpecItem `xml:"urn:vim25 XmlToCustomizationSpecItem,omitempty"` - Res *types.XmlToCustomizationSpecItemResponse `xml:"urn:vim25 XmlToCustomizationSpecItemResponse,omitempty"` + Res *types.XmlToCustomizationSpecItemResponse `xml:"XmlToCustomizationSpecItemResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16945,7 +18365,7 @@ func XmlToCustomizationSpecItem(ctx context.Context, r soap.RoundTripper, req *t type ZeroFillVirtualDisk_TaskBody struct { Req *types.ZeroFillVirtualDisk_Task `xml:"urn:vim25 ZeroFillVirtualDisk_Task,omitempty"` - Res *types.ZeroFillVirtualDisk_TaskResponse `xml:"urn:vim25 ZeroFillVirtualDisk_TaskResponse,omitempty"` + Res *types.ZeroFillVirtualDisk_TaskResponse `xml:"ZeroFillVirtualDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16965,7 +18385,7 @@ func ZeroFillVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *typ type ConfigureVcha_TaskBody struct { Req *types.ConfigureVcha_Task `xml:"urn:vim25 configureVcha_Task,omitempty"` - Res *types.ConfigureVcha_TaskResponse `xml:"urn:vim25 configureVcha_TaskResponse,omitempty"` + Res *types.ConfigureVcha_TaskResponse `xml:"configureVcha_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16985,7 +18405,7 @@ func ConfigureVcha_Task(ctx context.Context, r soap.RoundTripper, req *types.Con type CreatePassiveNode_TaskBody struct { Req *types.CreatePassiveNode_Task `xml:"urn:vim25 createPassiveNode_Task,omitempty"` - Res *types.CreatePassiveNode_TaskResponse `xml:"urn:vim25 createPassiveNode_TaskResponse,omitempty"` + Res *types.CreatePassiveNode_TaskResponse `xml:"createPassiveNode_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -17005,7 +18425,7 @@ func CreatePassiveNode_Task(ctx context.Context, r soap.RoundTripper, req *types type CreateWitnessNode_TaskBody struct { Req *types.CreateWitnessNode_Task `xml:"urn:vim25 createWitnessNode_Task,omitempty"` - Res *types.CreateWitnessNode_TaskResponse `xml:"urn:vim25 createWitnessNode_TaskResponse,omitempty"` + Res *types.CreateWitnessNode_TaskResponse `xml:"createWitnessNode_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -17025,7 +18445,7 @@ func CreateWitnessNode_Task(ctx context.Context, r soap.RoundTripper, req *types type DeployVcha_TaskBody struct { Req *types.DeployVcha_Task `xml:"urn:vim25 deployVcha_Task,omitempty"` - Res *types.DeployVcha_TaskResponse `xml:"urn:vim25 deployVcha_TaskResponse,omitempty"` + Res *types.DeployVcha_TaskResponse `xml:"deployVcha_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -17045,7 +18465,7 @@ func DeployVcha_Task(ctx context.Context, r soap.RoundTripper, req *types.Deploy type DestroyVcha_TaskBody struct { Req *types.DestroyVcha_Task `xml:"urn:vim25 destroyVcha_Task,omitempty"` - Res *types.DestroyVcha_TaskResponse `xml:"urn:vim25 destroyVcha_TaskResponse,omitempty"` + Res *types.DestroyVcha_TaskResponse `xml:"destroyVcha_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -17065,7 +18485,7 @@ func DestroyVcha_Task(ctx context.Context, r soap.RoundTripper, req *types.Destr type FetchSoftwarePackagesBody struct { Req *types.FetchSoftwarePackages `xml:"urn:vim25 fetchSoftwarePackages,omitempty"` - Res *types.FetchSoftwarePackagesResponse `xml:"urn:vim25 fetchSoftwarePackagesResponse,omitempty"` + Res *types.FetchSoftwarePackagesResponse `xml:"fetchSoftwarePackagesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -17085,7 +18505,7 @@ func FetchSoftwarePackages(ctx context.Context, r soap.RoundTripper, req *types. type GetClusterModeBody struct { Req *types.GetClusterMode `xml:"urn:vim25 getClusterMode,omitempty"` - Res *types.GetClusterModeResponse `xml:"urn:vim25 getClusterModeResponse,omitempty"` + Res *types.GetClusterModeResponse `xml:"getClusterModeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -17105,7 +18525,7 @@ func GetClusterMode(ctx context.Context, r soap.RoundTripper, req *types.GetClus type GetVchaConfigBody struct { Req *types.GetVchaConfig `xml:"urn:vim25 getVchaConfig,omitempty"` - Res *types.GetVchaConfigResponse `xml:"urn:vim25 getVchaConfigResponse,omitempty"` + Res *types.GetVchaConfigResponse `xml:"getVchaConfigResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -17125,7 +18545,7 @@ func GetVchaConfig(ctx context.Context, r soap.RoundTripper, req *types.GetVchaC type InitiateFailover_TaskBody struct { Req *types.InitiateFailover_Task `xml:"urn:vim25 initiateFailover_Task,omitempty"` - Res *types.InitiateFailover_TaskResponse `xml:"urn:vim25 initiateFailover_TaskResponse,omitempty"` + Res *types.InitiateFailover_TaskResponse `xml:"initiateFailover_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -17145,7 +18565,7 @@ func InitiateFailover_Task(ctx context.Context, r soap.RoundTripper, req *types. type InstallDateBody struct { Req *types.InstallDate `xml:"urn:vim25 installDate,omitempty"` - Res *types.InstallDateResponse `xml:"urn:vim25 installDateResponse,omitempty"` + Res *types.InstallDateResponse `xml:"installDateResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -17165,7 +18585,7 @@ func InstallDate(ctx context.Context, r soap.RoundTripper, req *types.InstallDat type PrepareVcha_TaskBody struct { Req *types.PrepareVcha_Task `xml:"urn:vim25 prepareVcha_Task,omitempty"` - Res *types.PrepareVcha_TaskResponse `xml:"urn:vim25 prepareVcha_TaskResponse,omitempty"` + Res *types.PrepareVcha_TaskResponse `xml:"prepareVcha_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -17185,7 +18605,7 @@ func PrepareVcha_Task(ctx context.Context, r soap.RoundTripper, req *types.Prepa type QueryDatacenterConfigOptionDescriptorBody struct { Req *types.QueryDatacenterConfigOptionDescriptor `xml:"urn:vim25 queryDatacenterConfigOptionDescriptor,omitempty"` - Res *types.QueryDatacenterConfigOptionDescriptorResponse `xml:"urn:vim25 queryDatacenterConfigOptionDescriptorResponse,omitempty"` + Res *types.QueryDatacenterConfigOptionDescriptorResponse `xml:"queryDatacenterConfigOptionDescriptorResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -17205,7 +18625,7 @@ func QueryDatacenterConfigOptionDescriptor(ctx context.Context, r soap.RoundTrip type ReloadVirtualMachineFromPath_TaskBody struct { Req *types.ReloadVirtualMachineFromPath_Task `xml:"urn:vim25 reloadVirtualMachineFromPath_Task,omitempty"` - Res *types.ReloadVirtualMachineFromPath_TaskResponse `xml:"urn:vim25 reloadVirtualMachineFromPath_TaskResponse,omitempty"` + Res *types.ReloadVirtualMachineFromPath_TaskResponse `xml:"reloadVirtualMachineFromPath_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -17225,7 +18645,7 @@ func ReloadVirtualMachineFromPath_Task(ctx context.Context, r soap.RoundTripper, type SetClusterMode_TaskBody struct { Req *types.SetClusterMode_Task `xml:"urn:vim25 setClusterMode_Task,omitempty"` - Res *types.SetClusterMode_TaskResponse `xml:"urn:vim25 setClusterMode_TaskResponse,omitempty"` + Res *types.SetClusterMode_TaskResponse `xml:"setClusterMode_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -17245,7 +18665,7 @@ func SetClusterMode_Task(ctx context.Context, r soap.RoundTripper, req *types.Se type SetCustomValueBody struct { Req *types.SetCustomValue `xml:"urn:vim25 setCustomValue,omitempty"` - Res *types.SetCustomValueResponse `xml:"urn:vim25 setCustomValueResponse,omitempty"` + Res *types.SetCustomValueResponse `xml:"setCustomValueResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -17265,7 +18685,7 @@ func SetCustomValue(ctx context.Context, r soap.RoundTripper, req *types.SetCust type UnregisterVApp_TaskBody struct { Req *types.UnregisterVApp_Task `xml:"urn:vim25 unregisterVApp_Task,omitempty"` - Res *types.UnregisterVApp_TaskResponse `xml:"urn:vim25 unregisterVApp_TaskResponse,omitempty"` + Res *types.UnregisterVApp_TaskResponse `xml:"unregisterVApp_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/methods/unreleased.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/methods/unreleased.go new file mode 100644 index 000000000000..a0ffff8c3270 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/methods/unreleased.go @@ -0,0 +1,44 @@ +/* + Copyright (c) 2022 VMware, Inc. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package methods + +import ( + "context" + + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +type PlaceVmsXClusterBody struct { + Req *types.PlaceVmsXCluster `xml:"urn:vim25 PlaceVmsXCluster,omitempty"` + Res *types.PlaceVmsXClusterResponse `xml:"PlaceVmsXClusterResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *PlaceVmsXClusterBody) Fault() *soap.Fault { return b.Fault_ } + +func PlaceVmsXCluster(ctx context.Context, r soap.RoundTripper, req *types.PlaceVmsXCluster) (*types.PlaceVmsXClusterResponse, error) { + var reqBody, resBody PlaceVmsXClusterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/mo/mo.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/mo/mo.go index 4f19988e36df..f3dcb5e29926 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/mo/mo.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/mo/mo.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2014-2018 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2022 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -79,12 +79,13 @@ func init() { type ClusterComputeResource struct { ComputeResource - Configuration types.ClusterConfigInfo `mo:"configuration"` - Recommendation []types.ClusterRecommendation `mo:"recommendation"` - DrsRecommendation []types.ClusterDrsRecommendation `mo:"drsRecommendation"` - MigrationHistory []types.ClusterDrsMigration `mo:"migrationHistory"` - ActionHistory []types.ClusterActionHistory `mo:"actionHistory"` - DrsFault []types.ClusterDrsFaults `mo:"drsFault"` + Configuration types.ClusterConfigInfo `mo:"configuration"` + Recommendation []types.ClusterRecommendation `mo:"recommendation"` + DrsRecommendation []types.ClusterDrsRecommendation `mo:"drsRecommendation"` + HciConfig *types.ClusterComputeResourceHCIConfigInfo `mo:"hciConfig"` + MigrationHistory []types.ClusterDrsMigration `mo:"migrationHistory"` + ActionHistory []types.ClusterActionHistory `mo:"actionHistory"` + DrsFault []types.ClusterDrsFaults `mo:"drsFault"` } func init() { @@ -128,6 +129,7 @@ type ComputeResource struct { Summary types.BaseComputeResourceSummary `mo:"summary"` EnvironmentBrowser *types.ManagedObjectReference `mo:"environmentBrowser"` ConfigurationEx types.BaseComputeResourceConfigInfo `mo:"configurationEx"` + LifecycleManaged *bool `mo:"lifecycleManaged"` } func (m *ComputeResource) Entity() *ManagedEntity { @@ -441,6 +443,7 @@ type Folder struct { ChildType []string `mo:"childType"` ChildEntity []types.ManagedObjectReference `mo:"childEntity"` + Namespace *string `mo:"namespace"` } func (m *Folder) Entity() *ManagedEntity { @@ -577,6 +580,21 @@ func init() { t["HostActiveDirectoryAuthentication"] = reflect.TypeOf((*HostActiveDirectoryAuthentication)(nil)).Elem() } +type HostAssignableHardwareManager struct { + Self types.ManagedObjectReference + + Binding []types.HostAssignableHardwareBinding `mo:"binding"` + Config types.HostAssignableHardwareConfig `mo:"config"` +} + +func (m HostAssignableHardwareManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostAssignableHardwareManager"] = reflect.TypeOf((*HostAssignableHardwareManager)(nil)).Elem() +} + type HostAuthenticationManager struct { Self types.ManagedObjectReference @@ -1265,10 +1283,10 @@ func init() { type Network struct { ManagedEntity - Name string `mo:"name"` Summary types.BaseNetworkSummary `mo:"summary"` Host []types.ManagedObjectReference `mo:"host"` Vm []types.ManagedObjectReference `mo:"vm"` + Name string `mo:"name"` } func (m *Network) Entity() *ManagedEntity { @@ -1444,6 +1462,7 @@ type ResourcePool struct { ResourcePool []types.ManagedObjectReference `mo:"resourcePool"` Vm []types.ManagedObjectReference `mo:"vm"` Config types.ResourceConfigSpec `mo:"config"` + Namespace *string `mo:"namespace"` ChildConfiguration []types.ResourceConfigSpec `mo:"childConfiguration"` } @@ -1556,6 +1575,18 @@ func init() { t["SimpleCommand"] = reflect.TypeOf((*SimpleCommand)(nil)).Elem() } +type SiteInfoManager struct { + Self types.ManagedObjectReference +} + +func (m SiteInfoManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["SiteInfoManager"] = reflect.TypeOf((*SiteInfoManager)(nil)).Elem() +} + type StoragePod struct { Folder @@ -1567,6 +1598,18 @@ func init() { t["StoragePod"] = reflect.TypeOf((*StoragePod)(nil)).Elem() } +type StorageQueryManager struct { + Self types.ManagedObjectReference +} + +func (m StorageQueryManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["StorageQueryManager"] = reflect.TypeOf((*StorageQueryManager)(nil)).Elem() +} + type StorageResourceManager struct { Self types.ManagedObjectReference } @@ -1615,6 +1658,18 @@ func init() { t["TaskManager"] = reflect.TypeOf((*TaskManager)(nil)).Elem() } +type TenantTenantManager struct { + Self types.ManagedObjectReference +} + +func (m TenantTenantManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["TenantTenantManager"] = reflect.TypeOf((*TenantTenantManager)(nil)).Elem() +} + type UserDirectory struct { Self types.ManagedObjectReference @@ -1744,6 +1799,18 @@ func init() { t["VirtualMachineCompatibilityChecker"] = reflect.TypeOf((*VirtualMachineCompatibilityChecker)(nil)).Elem() } +type VirtualMachineGuestCustomizationManager struct { + Self types.ManagedObjectReference +} + +func (m VirtualMachineGuestCustomizationManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["VirtualMachineGuestCustomizationManager"] = reflect.TypeOf((*VirtualMachineGuestCustomizationManager)(nil)).Elem() +} + type VirtualMachineProvisioningChecker struct { Self types.ManagedObjectReference } diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/mo/retrieve.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/mo/retrieve.go index c470c0ac0a10..e877da063a38 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/mo/retrieve.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/mo/retrieve.go @@ -46,7 +46,7 @@ func ignoreMissingProperty(ref types.ManagedObjectReference, p types.MissingProp // it returns the first fault it finds there as error. If the 'MissingSet' // field is empty, it returns a pointer to a reflect.Value. It handles contain // nested properties, such as 'guest.ipAddress' or 'config.hardware'. -func ObjectContentToType(o types.ObjectContent) (interface{}, error) { +func ObjectContentToType(o types.ObjectContent, ptr ...bool) (interface{}, error) { // Expect no properties in the missing set for _, p := range o.MissingSet { if ignoreMissingProperty(o.Obj, p) { @@ -62,6 +62,9 @@ func ObjectContentToType(o types.ObjectContent) (interface{}, error) { return nil, err } + if len(ptr) == 1 && ptr[0] { + return v.Interface(), nil + } return v.Elem().Interface(), nil } @@ -81,9 +84,9 @@ func ApplyPropertyChange(obj Reference, changes []types.PropertyChange) { } } -// LoadRetrievePropertiesResponse converts the response of a call to -// RetrieveProperties to one or more managed objects. -func LoadRetrievePropertiesResponse(res *types.RetrievePropertiesResponse, dst interface{}) error { +// LoadObjectContent converts the response of a call to +// RetrieveProperties{Ex} to one or more managed objects. +func LoadObjectContent(content []types.ObjectContent, dst interface{}) error { rt := reflect.TypeOf(dst) if rt == nil || rt.Kind() != reflect.Ptr { panic("need pointer") @@ -104,7 +107,7 @@ func LoadRetrievePropertiesResponse(res *types.RetrievePropertiesResponse, dst i } if isSlice { - for _, p := range res.Returnval { + for _, p := range content { v, err := ObjectContentToType(p) if err != nil { return err @@ -123,10 +126,10 @@ func LoadRetrievePropertiesResponse(res *types.RetrievePropertiesResponse, dst i rv.Set(reflect.Append(rv, reflect.ValueOf(v))) } } else { - switch len(res.Returnval) { + switch len(content) { case 0: case 1: - v, err := ObjectContentToType(res.Returnval[0]) + v, err := ObjectContentToType(content[0]) if err != nil { return err } @@ -160,7 +163,7 @@ func RetrievePropertiesForRequest(ctx context.Context, r soap.RoundTripper, req return err } - return LoadRetrievePropertiesResponse(res, dst) + return LoadObjectContent(res.Returnval, dst) } // RetrieveProperties retrieves the properties of the managed object specified @@ -188,3 +191,65 @@ func RetrieveProperties(ctx context.Context, r soap.RoundTripper, pc, obj types. return RetrievePropertiesForRequest(ctx, r, req, dst) } + +var morType = reflect.TypeOf((*types.ManagedObjectReference)(nil)).Elem() + +// References returns all non-nil moref field values in the given struct. +// Only Anonymous struct fields are followed by default. The optional follow +// param will follow any struct fields when true. +func References(s interface{}, follow ...bool) []types.ManagedObjectReference { + var refs []types.ManagedObjectReference + rval := reflect.ValueOf(s) + rtype := rval.Type() + + if rval.Kind() == reflect.Ptr { + rval = rval.Elem() + rtype = rval.Type() + } + + for i := 0; i < rval.NumField(); i++ { + val := rval.Field(i) + finfo := rtype.Field(i) + + if finfo.Anonymous { + refs = append(refs, References(val.Interface(), follow...)...) + continue + } + if finfo.Name == "Self" { + continue + } + + ftype := val.Type() + + if ftype.Kind() == reflect.Slice { + if ftype.Elem() == morType { + s := val.Interface().([]types.ManagedObjectReference) + for i := range s { + refs = append(refs, s[i]) + } + } + continue + } + + if ftype.Kind() == reflect.Ptr { + if val.IsNil() { + continue + } + val = val.Elem() + ftype = val.Type() + } + + if ftype == morType { + refs = append(refs, val.Interface().(types.ManagedObjectReference)) + continue + } + + if len(follow) != 0 && follow[0] { + if ftype.Kind() == reflect.Struct && val.CanSet() { + refs = append(refs, References(val.Interface(), follow...)...) + } + } + } + + return refs +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/mo/type_info.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/mo/type_info.go index 661f2422810a..aeb7d4a7980a 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/mo/type_info.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/mo/type_info.go @@ -157,7 +157,7 @@ func (t *typeInfo) build(typ reflect.Type, fn string, fi []int) { var nilValue reflect.Value -// assignValue assignes a value 'pv' to the struct pointed to by 'val', given a +// assignValue assigns a value 'pv' to the struct pointed to by 'val', given a // slice of field indices. It recurses into the struct until it finds the field // specified by the indices. It creates new values for pointer types where // needed. @@ -256,3 +256,8 @@ func (t *typeInfo) LoadFromObjectContent(o types.ObjectContent) (reflect.Value, return v, nil } + +func IsManagedObjectType(kind string) bool { + _, ok := t[kind] + return ok +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/progress/reader.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/progress/reader.go index cb184cfe99c0..201333ee4b59 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/progress/reader.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/progress/reader.go @@ -36,6 +36,9 @@ type readerReport struct { } func (r readerReport) Percentage() float32 { + if r.size <= 0 { + return 0 + } return 100.0 * float32(r.pos) / float32(r.size) } @@ -158,7 +161,7 @@ func bpsLoop(ch <-chan Report, dst *uint64) { // Setup timer for front of list to become stale. if e := l.Front(); e != nil { - dt := time.Second - time.Now().Sub(e.Value.(readerReport).t) + dt := time.Second - time.Since(e.Value.(readerReport).t) tch = time.After(dt) } diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/retry.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/retry.go index b8807eef3192..bf663a101267 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/retry.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/retry.go @@ -18,8 +18,6 @@ package vim25 import ( "context" - "net" - "net/url" "time" "github.com/vmware/govmomi/vim25/soap" @@ -27,37 +25,40 @@ import ( type RetryFunc func(err error) (retry bool, delay time.Duration) -// TemporaryNetworkError returns a RetryFunc that retries up to a maximum of n -// times, only if the error returned by the RoundTrip function is a temporary -// network error (for example: a connect timeout). +// TemporaryNetworkError is deprecated. Use Retry() with RetryTemporaryNetworkError and retryAttempts instead. func TemporaryNetworkError(n int) RetryFunc { - return func(err error) (retry bool, delay time.Duration) { - var nerr net.Error - var ok bool - - // Never retry if this is not a network error. - switch rerr := err.(type) { - case *url.Error: - if nerr, ok = rerr.Err.(net.Error); !ok { + return func(err error) (bool, time.Duration) { + if IsTemporaryNetworkError(err) { + // Don't retry if we're out of tries. + if n--; n <= 0 { return false, 0 } - case net.Error: - nerr = rerr - default: - return false, 0 - } - - if !nerr.Temporary() { - return false, 0 + return true, 0 } + return false, 0 + } +} - // Don't retry if we're out of tries. - if n--; n <= 0 { - return false, 0 - } +// RetryTemporaryNetworkError returns a RetryFunc that returns IsTemporaryNetworkError(err) +func RetryTemporaryNetworkError(err error) (bool, time.Duration) { + return IsTemporaryNetworkError(err), 0 +} - return true, 0 +// IsTemporaryNetworkError returns false unless the error implements +// a Temporary() bool method such as url.Error and net.Error. +// Otherwise, returns the value of the Temporary() method. +func IsTemporaryNetworkError(err error) bool { + t, ok := err.(interface { + // Temporary is implemented by url.Error and net.Error + Temporary() bool + }) + + if !ok { + // Not a Temporary error. + return false } + + return t.Temporary() } type retry struct { @@ -66,7 +67,8 @@ type retry struct { // fn is a custom function that is called when an error occurs. // It returns whether or not to retry, and if so, how long to // delay before retrying. - fn RetryFunc + fn RetryFunc + maxRetryAttempts int } // Retry wraps the specified soap.RoundTripper and invokes the @@ -74,10 +76,16 @@ type retry struct { // retry the call, and if so, how long to wait before retrying. If // the result of this function is to not retry, the original error // is returned from the RoundTrip function. -func Retry(roundTripper soap.RoundTripper, fn RetryFunc) soap.RoundTripper { +// The soap.RoundTripper will return the original error if retryAttempts is specified and reached. +func Retry(roundTripper soap.RoundTripper, fn RetryFunc, retryAttempts ...int) soap.RoundTripper { r := &retry{ - roundTripper: roundTripper, - fn: fn, + roundTripper: roundTripper, + fn: fn, + maxRetryAttempts: 1, + } + + if len(retryAttempts) == 1 { + r.maxRetryAttempts = retryAttempts[0] } return r @@ -86,7 +94,7 @@ func Retry(roundTripper soap.RoundTripper, fn RetryFunc) soap.RoundTripper { func (r *retry) RoundTrip(ctx context.Context, req, res soap.HasFault) error { var err error - for { + for attempt := 0; attempt < r.maxRetryAttempts; attempt++ { err = r.roundTripper.RoundTrip(ctx, req, res) if err == nil { break diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/soap/client.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/soap/client.go index 90b9fbe16f68..e46a17f95484 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/soap/client.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/soap/client.go @@ -35,11 +35,14 @@ import ( "net/url" "os" "path/filepath" + "reflect" "regexp" + "runtime" "strings" "sync" "time" + "github.com/vmware/govmomi/internal/version" "github.com/vmware/govmomi/vim25/progress" "github.com/vmware/govmomi/vim25/types" "github.com/vmware/govmomi/vim25/xml" @@ -57,6 +60,15 @@ const ( SessionCookieName = "vmware_soap_session" ) +// defaultUserAgent is the default user agent string, e.g. +// "govmomi/0.28.0 (go1.18.3;linux;amd64)" +var defaultUserAgent = fmt.Sprintf( + "%s/%s (%s)", + version.ClientName, + version.ClientVersion, + strings.Join([]string{runtime.Version(), runtime.GOOS, runtime.GOARCH}, ";"), +) + type Client struct { http.Client @@ -70,9 +82,11 @@ type Client struct { Namespace string // Vim namespace Version string // Vim version + Types types.Func UserAgent string - cookie string + cookie string + insecureCookies bool } var schemeMatch = regexp.MustCompile(`^\w+://`) @@ -122,6 +136,8 @@ func NewClient(u *url.URL, insecure bool) *Client { u: u, k: insecure, d: newDebug(), + + Types: types.TypeFunc(), } // Initialize http.RoundTripper on client, so we can customize it below @@ -140,10 +156,14 @@ func NewClient(u *url.URL, insecure bool) *Client { c.hosts = make(map[string]string) c.t.TLSClientConfig = &tls.Config{InsecureSkipVerify: c.k} - // Don't bother setting DialTLS if InsecureSkipVerify=true - if !c.k { - c.t.DialTLS = c.dialTLS - } + + // Always set DialTLS and DialTLSContext, even if InsecureSkipVerify=true, + // because of how certificate verification has been delegated to the host's + // PKI framework in Go 1.18. Please see the following links for more info: + // + // * https://tip.golang.org/doc/go1.18 (search for "Certificate.Verify") + // * https://github.com/square/certigo/issues/264 + c.t.DialTLSContext = c.dialTLSContext c.Client.Transport = c.t c.Client.Jar, _ = cookiejar.New(nil) @@ -152,9 +172,17 @@ func NewClient(u *url.URL, insecure bool) *Client { c.u = c.URL() c.u.User = nil + if c.u.Scheme == "http" { + c.insecureCookies = os.Getenv("GOVMOMI_INSECURE_COOKIES") == "true" + } + return &c } +func (c *Client) DefaultTransport() *http.Transport { + return c.t +} + // NewServiceClient creates a NewClient with the given URL.Path and namespace. func (c *Client) NewServiceClient(path string, namespace string) *Client { vc := c.URL() @@ -169,7 +197,7 @@ func (c *Client) NewServiceClient(path string, namespace string) *Client { client := NewClient(u, c.k) client.Namespace = "urn:" + namespace - client.Transport.(*http.Transport).TLSClientConfig = c.Transport.(*http.Transport).TLSClientConfig + client.DefaultTransport().TLSClientConfig = c.DefaultTransport().TLSClientConfig if cert := c.Certificate(); cert != nil { client.SetCertificate(*cert) } @@ -195,19 +223,34 @@ func (c *Client) NewServiceClient(path string, namespace string) *Client { // Copy any query params (e.g. GOVMOMI_TUNNEL_PROXY_PORT used in testing) client.u.RawQuery = vc.RawQuery + client.UserAgent = c.UserAgent + + vimTypes := c.Types + client.Types = func(name string) (reflect.Type, bool) { + kind, ok := vimTypes(name) + if ok { + return kind, ok + } + // vim25/xml typeToString() does not have an option to include namespace prefix. + // Workaround this by re-trying the lookup with the namespace prefix. + return vimTypes(namespace + ":" + name) + } + return client } -// SetRootCAs defines the set of root certificate authorities -// that clients use when verifying server certificates. -// By default TLS uses the host's root CA set. +// SetRootCAs defines the set of PEM-encoded file locations of root certificate +// authorities the client uses when verifying server certificates instead of the +// TLS defaults which uses the host's root CA set. Multiple PEM file locations +// can be specified using the OS-specific PathListSeparator. // -// See: http.Client.Transport.TLSClientConfig.RootCAs -func (c *Client) SetRootCAs(file string) error { +// See: http.Client.Transport.TLSClientConfig.RootCAs and +// https://pkg.go.dev/os#PathListSeparator +func (c *Client) SetRootCAs(pemPaths string) error { pool := x509.NewCertPool() - for _, name := range filepath.SplitList(file) { - pem, err := ioutil.ReadFile(name) + for _, name := range filepath.SplitList(pemPaths) { + pem, err := ioutil.ReadFile(filepath.Clean(name)) if err != nil { return err } @@ -277,7 +320,7 @@ func (c *Client) LoadThumbprints(file string) error { } func (c *Client) loadThumbprints(name string) error { - f, err := os.Open(name) + f, err := os.Open(filepath.Clean(name)) if err != nil { if os.IsNotExist(err) { return nil @@ -313,7 +356,10 @@ func ThumbprintSHA1(cert *x509.Certificate) string { return strings.Join(hex, ":") } -func (c *Client) dialTLS(network string, addr string) (net.Conn, error) { +func (c *Client) dialTLSContext( + ctx context.Context, + network, addr string) (net.Conn, error) { + // Would be nice if there was a tls.Config.Verify func, // see tls.clientHandshakeState.doFullHandshake @@ -327,7 +373,20 @@ func (c *Client) dialTLS(network string, addr string) (net.Conn, error) { case x509.UnknownAuthorityError: case x509.HostnameError: default: - return nil, err + // Allow a thumbprint verification attempt if the error indicates + // the failure was due to lack of trust. + // + // Please note the err variable is not a special type of x509 or HTTP + // error that can be validated by a type assertion. The err variable is + // in fact an *errors.errorString. + switch { + case strings.HasSuffix(err.Error(), "certificate is not trusted"): + // darwin and linux + case strings.HasSuffix(err.Error(), "certificate signed by unknown authority"): + // windows + default: + return nil, err + } } thumbprint := c.Thumbprint(addr) @@ -346,12 +405,16 @@ func (c *Client) dialTLS(network string, addr string) (net.Conn, error) { if thumbprint != peer { _ = conn.Close() - return nil, fmt.Errorf("Host %q thumbprint does not match %q", addr, thumbprint) + return nil, fmt.Errorf("host %q thumbprint does not match %q", addr, thumbprint) } return conn, nil } +func (c *Client) dialTLS(network, addr string) (net.Conn, error) { + return c.dialTLSContext(context.Background(), network, addr) +} + // splitHostPort is similar to net.SplitHostPort, // but rather than return error if there isn't a ':port', // return an empty string for the port. @@ -424,6 +487,7 @@ type marshaledClient struct { Cookies []*http.Cookie URL *url.URL Insecure bool + Version string } func (c *Client) MarshalJSON() ([]byte, error) { @@ -431,6 +495,7 @@ func (c *Client) MarshalJSON() ([]byte, error) { Cookies: c.Jar.Cookies(c.u), URL: c.u, Insecure: c.k, + Version: c.Version, } return json.Marshal(m) @@ -445,6 +510,7 @@ func (c *Client) UnmarshalJSON(b []byte) error { } *c = *NewClient(m.URL, m.Insecure) + c.Version = m.Version c.Jar.SetCookies(m.URL, m.Cookies) return nil @@ -452,6 +518,16 @@ func (c *Client) UnmarshalJSON(b []byte) error { type kindContext struct{} +func (c *Client) setInsecureCookies(res *http.Response) { + cookies := res.Cookies() + if len(cookies) != 0 { + for _, cookie := range cookies { + cookie.Secure = false + } + c.Jar.SetCookies(c.u, cookies) + } +} + func (c *Client) Do(ctx context.Context, req *http.Request, f func(*http.Response) error) error { if ctx == nil { ctx = context.Background() @@ -462,12 +538,16 @@ func (c *Client) Do(ctx context.Context, req *http.Request, f func(*http.Respons defer d.done() } - if c.UserAgent != "" { - req.Header.Set(`User-Agent`, c.UserAgent) + // use default + if c.UserAgent == "" { + c.UserAgent = defaultUserAgent } + req.Header.Set(`User-Agent`, c.UserAgent) + + ext := "" if d.enabled() { - d.debugRequest(req) + ext = d.debugRequest(req) } tstart := time.Now() @@ -488,12 +568,16 @@ func (c *Client) Do(ctx context.Context, req *http.Request, f func(*http.Respons return err } - defer res.Body.Close() - if d.enabled() { - d.debugResponse(res) + d.debugResponse(res, ext) } + if c.insecureCookies { + c.setInsecureCookies(res) + } + + defer res.Body.Close() + return f(res) } @@ -511,6 +595,32 @@ func (c *Client) WithHeader(ctx context.Context, header Header) context.Context return context.WithValue(ctx, headerContext{}, header) } +type statusError struct { + res *http.Response +} + +// Temporary returns true for HTTP response codes that can be retried +// See vim25.IsTemporaryNetworkError +func (e *statusError) Temporary() bool { + switch e.res.StatusCode { + case http.StatusBadGateway: + return true + } + return false +} + +func (e *statusError) Error() string { + return e.res.Status +} + +func newStatusError(res *http.Response) error { + return &url.Error{ + Op: res.Request.Method, + URL: res.Request.URL.Path, + Err: &statusError{res}, + } +} + func (c *Client) RoundTrip(ctx context.Context, reqBody, resBody HasFault) error { var err error var b []byte @@ -566,11 +676,11 @@ func (c *Client) RoundTrip(ctx context.Context, reqBody, resBody HasFault) error case http.StatusInternalServerError: // Error, but typically includes a body explaining the error default: - return errors.New(res.Status) + return newStatusError(res) } dec := xml.NewDecoder(res.Body) - dec.TypeFunc = types.TypeFunc() + dec.TypeFunc = c.Types err = dec.Decode(&resEnv) if err != nil { return err @@ -681,7 +791,7 @@ func (c *Client) UploadFile(ctx context.Context, file string, u *url.URL, param return err } - f, err := os.Open(file) + f, err := os.Open(filepath.Clean(file)) if err != nil { return err } @@ -734,7 +844,7 @@ func (c *Client) Download(ctx context.Context, u *url.URL, param *Download) (io. switch res.StatusCode { case http.StatusOK: default: - err = errors.New(res.Status) + err = fmt.Errorf("download(%s): %s", u, res.Status) } if err != nil { @@ -758,7 +868,7 @@ func (c *Client) WriteFile(ctx context.Context, file string, src io.Reader, size if s != nil { pr := progress.NewReader(ctx, s, src, size) - src = pr + r = pr // Mark progress reader as done when returning from this function. defer func() { diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/soap/debug.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/soap/debug.go index 844581b99ce6..bc5b902030ae 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/soap/debug.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/soap/debug.go @@ -21,25 +21,19 @@ import ( "io" "net/http" "net/http/httputil" - "strings" "sync/atomic" "time" "github.com/vmware/govmomi/vim25/debug" ) -// teeReader wraps io.TeeReader and patches through the Close() function. -type teeReader struct { - io.Reader - io.Closer -} - -func newTeeReader(rc io.ReadCloser, w io.Writer) io.ReadCloser { - return teeReader{ - Reader: io.TeeReader(rc, w), - Closer: rc, +var ( + // Trace reads an http request or response from rc and writes to w. + // The content type (kind) should be one of "xml" or "json". + Trace = func(rc io.ReadCloser, w io.Writer, kind string) io.ReadCloser { + return debug.NewTeeReader(rc, w) } -} +) // debugRoundTrip contains state and logic needed to debug a single round trip. type debugRoundTrip struct { @@ -71,50 +65,52 @@ func (d *debugRoundTrip) newFile(suffix string) io.WriteCloser { } func (d *debugRoundTrip) ext(h http.Header) string { + const json = "application/json" ext := "xml" - if strings.Contains(h.Get("Content-Type"), "/json") { + if h.Get("Accept") == json || h.Get("Content-Type") == json { ext = "json" } return ext } -func (d *debugRoundTrip) debugRequest(req *http.Request) { +func (d *debugRoundTrip) debugRequest(req *http.Request) string { if d == nil { - return + return "" } - var wc io.WriteCloser - // Capture headers - wc = d.newFile("req.headers") + var wc io.WriteCloser = d.newFile("req.headers") b, _ := httputil.DumpRequest(req, false) wc.Write(b) wc.Close() + ext := d.ext(req.Header) // Capture body - wc = d.newFile("req." + d.ext(req.Header)) - req.Body = newTeeReader(req.Body, wc) + wc = d.newFile("req." + ext) + if req.Body != nil { + req.Body = Trace(req.Body, wc, ext) + } // Delay closing until marked done d.cs = append(d.cs, wc) + + return ext } -func (d *debugRoundTrip) debugResponse(res *http.Response) { +func (d *debugRoundTrip) debugResponse(res *http.Response, ext string) { if d == nil { return } - var wc io.WriteCloser - // Capture headers - wc = d.newFile("res.headers") + var wc io.WriteCloser = d.newFile("res.headers") b, _ := httputil.DumpResponse(res, false) wc.Write(b) wc.Close() // Capture body - wc = d.newFile("res." + d.ext(res.Header)) - res.Body = newTeeReader(res.Body, wc) + wc = d.newFile("res." + ext) + res.Body = Trace(res.Body, wc, ext) // Delay closing until marked done d.cs = append(d.cs, wc) diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/soap/error.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/soap/error.go index 46111556cbd1..1e1508733a52 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/soap/error.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/soap/error.go @@ -17,6 +17,7 @@ limitations under the License. package soap import ( + "encoding/json" "fmt" "reflect" @@ -49,6 +50,15 @@ func (s soapFaultError) Error() string { return fmt.Sprintf("%s: %s", s.fault.Code, msg) } +func (s soapFaultError) MarshalJSON() ([]byte, error) { + out := struct { + Fault *Fault + }{ + Fault: s.fault, + } + return json.Marshal(out) +} + type vimFaultError struct { fault types.BaseMethodFault } diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/types/enum.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/types/enum.go index c8c5977526e9..e8da547b38cd 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/types/enum.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/types/enum.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2014-2018 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2022 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -200,8 +200,7 @@ func init() { type CannotEnableVmcpForClusterReason string const ( - CannotEnableVmcpForClusterReasonAPDTimeoutDisabled = CannotEnableVmcpForClusterReason("APDTimeoutDisabled") - CannotEnableVmcpForClusterReasonIncompatibleHostVersion = CannotEnableVmcpForClusterReason("IncompatibleHostVersion") + CannotEnableVmcpForClusterReasonAPDTimeoutDisabled = CannotEnableVmcpForClusterReason("APDTimeoutDisabled") ) func init() { @@ -239,6 +238,8 @@ const ( CannotUseNetworkReasonMismatchedNetworkPolicies = CannotUseNetworkReason("MismatchedNetworkPolicies") CannotUseNetworkReasonMismatchedDvsVersionOrVendor = CannotUseNetworkReason("MismatchedDvsVersionOrVendor") CannotUseNetworkReasonVMotionToUnsupportedNetworkType = CannotUseNetworkReason("VMotionToUnsupportedNetworkType") + CannotUseNetworkReasonNetworkUnderMaintenance = CannotUseNetworkReason("NetworkUnderMaintenance") + CannotUseNetworkReasonMismatchedEnsMode = CannotUseNetworkReason("MismatchedEnsMode") ) func init() { @@ -259,6 +260,41 @@ func init() { t["CheckTestType"] = reflect.TypeOf((*CheckTestType)(nil)).Elem() } +type ClusterComputeResourceHCIWorkflowState string + +const ( + ClusterComputeResourceHCIWorkflowStateIn_progress = ClusterComputeResourceHCIWorkflowState("in_progress") + ClusterComputeResourceHCIWorkflowStateDone = ClusterComputeResourceHCIWorkflowState("done") + ClusterComputeResourceHCIWorkflowStateInvalid = ClusterComputeResourceHCIWorkflowState("invalid") +) + +func init() { + t["ClusterComputeResourceHCIWorkflowState"] = reflect.TypeOf((*ClusterComputeResourceHCIWorkflowState)(nil)).Elem() +} + +type ClusterComputeResourceVcsHealthStatus string + +const ( + ClusterComputeResourceVcsHealthStatusHealthy = ClusterComputeResourceVcsHealthStatus("healthy") + ClusterComputeResourceVcsHealthStatusDegraded = ClusterComputeResourceVcsHealthStatus("degraded") + ClusterComputeResourceVcsHealthStatusNonhealthy = ClusterComputeResourceVcsHealthStatus("nonhealthy") +) + +func init() { + t["ClusterComputeResourceVcsHealthStatus"] = reflect.TypeOf((*ClusterComputeResourceVcsHealthStatus)(nil)).Elem() +} + +type ClusterCryptoConfigInfoCryptoMode string + +const ( + ClusterCryptoConfigInfoCryptoModeOnDemand = ClusterCryptoConfigInfoCryptoMode("onDemand") + ClusterCryptoConfigInfoCryptoModeForceEnable = ClusterCryptoConfigInfoCryptoMode("forceEnable") +) + +func init() { + t["ClusterCryptoConfigInfoCryptoMode"] = reflect.TypeOf((*ClusterCryptoConfigInfoCryptoMode)(nil)).Elem() +} + type ClusterDasAamNodeStateDasState string const ( @@ -324,6 +360,7 @@ const ( ClusterDasFdmAvailabilityStateInitializationError = ClusterDasFdmAvailabilityState("initializationError") ClusterDasFdmAvailabilityStateUninitializationError = ClusterDasFdmAvailabilityState("uninitializationError") ClusterDasFdmAvailabilityStateFdmUnreachable = ClusterDasFdmAvailabilityState("fdmUnreachable") + ClusterDasFdmAvailabilityStateRetry = ClusterDasFdmAvailabilityState("retry") ) func init() { @@ -494,6 +531,34 @@ func init() { t["ConfigSpecOperation"] = reflect.TypeOf((*ConfigSpecOperation)(nil)).Elem() } +type CryptoManagerKmipCryptoKeyStatusKeyUnavailableReason string + +const ( + CryptoManagerKmipCryptoKeyStatusKeyUnavailableReasonKeyStateMissingInCache = CryptoManagerKmipCryptoKeyStatusKeyUnavailableReason("KeyStateMissingInCache") + CryptoManagerKmipCryptoKeyStatusKeyUnavailableReasonKeyStateClusterInvalid = CryptoManagerKmipCryptoKeyStatusKeyUnavailableReason("KeyStateClusterInvalid") + CryptoManagerKmipCryptoKeyStatusKeyUnavailableReasonKeyStateClusterUnreachable = CryptoManagerKmipCryptoKeyStatusKeyUnavailableReason("KeyStateClusterUnreachable") + CryptoManagerKmipCryptoKeyStatusKeyUnavailableReasonKeyStateMissingInKMS = CryptoManagerKmipCryptoKeyStatusKeyUnavailableReason("KeyStateMissingInKMS") + CryptoManagerKmipCryptoKeyStatusKeyUnavailableReasonKeyStateNotActiveOrEnabled = CryptoManagerKmipCryptoKeyStatusKeyUnavailableReason("KeyStateNotActiveOrEnabled") + CryptoManagerKmipCryptoKeyStatusKeyUnavailableReasonKeyStateManagedByTrustAuthority = CryptoManagerKmipCryptoKeyStatusKeyUnavailableReason("KeyStateManagedByTrustAuthority") +) + +func init() { + t["CryptoManagerKmipCryptoKeyStatusKeyUnavailableReason"] = reflect.TypeOf((*CryptoManagerKmipCryptoKeyStatusKeyUnavailableReason)(nil)).Elem() +} + +type CustomizationFailedReasonCode string + +const ( + CustomizationFailedReasonCodeUserDefinedScriptDisabled = CustomizationFailedReasonCode("userDefinedScriptDisabled") + CustomizationFailedReasonCodeCustomizationDisabled = CustomizationFailedReasonCode("customizationDisabled") + CustomizationFailedReasonCodeRawDataIsNotSupported = CustomizationFailedReasonCode("rawDataIsNotSupported") + CustomizationFailedReasonCodeWrongMetadataFormat = CustomizationFailedReasonCode("wrongMetadataFormat") +) + +func init() { + t["CustomizationFailedReasonCode"] = reflect.TypeOf((*CustomizationFailedReasonCode)(nil)).Elem() +} + type CustomizationLicenseDataMode string const ( @@ -576,6 +641,8 @@ const ( DasConfigFaultDasConfigFaultReasonCreateConfigVvolFailed = DasConfigFaultDasConfigFaultReason("CreateConfigVvolFailed") DasConfigFaultDasConfigFaultReasonVSanNotSupportedOnHost = DasConfigFaultDasConfigFaultReason("VSanNotSupportedOnHost") DasConfigFaultDasConfigFaultReasonDasNetworkMisconfiguration = DasConfigFaultDasConfigFaultReason("DasNetworkMisconfiguration") + DasConfigFaultDasConfigFaultReasonSetDesiredImageSpecFailed = DasConfigFaultDasConfigFaultReason("SetDesiredImageSpecFailed") + DasConfigFaultDasConfigFaultReasonApplyHAVibsOnClusterFailed = DasConfigFaultDasConfigFaultReason("ApplyHAVibsOnClusterFailed") ) func init() { @@ -703,6 +770,17 @@ func init() { t["DisallowedChangeByServiceDisallowedChange"] = reflect.TypeOf((*DisallowedChangeByServiceDisallowedChange)(nil)).Elem() } +type DistributedVirtualPortgroupBackingType string + +const ( + DistributedVirtualPortgroupBackingTypeStandard = DistributedVirtualPortgroupBackingType("standard") + DistributedVirtualPortgroupBackingTypeNsx = DistributedVirtualPortgroupBackingType("nsx") +) + +func init() { + t["DistributedVirtualPortgroupBackingType"] = reflect.TypeOf((*DistributedVirtualPortgroupBackingType)(nil)).Elem() +} + type DistributedVirtualPortgroupMetaTagName string const ( @@ -739,6 +817,8 @@ const ( DistributedVirtualSwitchHostInfrastructureTrafficClassHbr = DistributedVirtualSwitchHostInfrastructureTrafficClass("hbr") DistributedVirtualSwitchHostInfrastructureTrafficClassVsan = DistributedVirtualSwitchHostInfrastructureTrafficClass("vsan") DistributedVirtualSwitchHostInfrastructureTrafficClassVdp = DistributedVirtualSwitchHostInfrastructureTrafficClass("vdp") + DistributedVirtualSwitchHostInfrastructureTrafficClassBackupNfc = DistributedVirtualSwitchHostInfrastructureTrafficClass("backupNfc") + DistributedVirtualSwitchHostInfrastructureTrafficClassNvmetcp = DistributedVirtualSwitchHostInfrastructureTrafficClass("nvmetcp") ) func init() { @@ -760,6 +840,17 @@ func init() { t["DistributedVirtualSwitchHostMemberHostComponentState"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberHostComponentState)(nil)).Elem() } +type DistributedVirtualSwitchHostMemberTransportZoneType string + +const ( + DistributedVirtualSwitchHostMemberTransportZoneTypeVlan = DistributedVirtualSwitchHostMemberTransportZoneType("vlan") + DistributedVirtualSwitchHostMemberTransportZoneTypeOverlay = DistributedVirtualSwitchHostMemberTransportZoneType("overlay") +) + +func init() { + t["DistributedVirtualSwitchHostMemberTransportZoneType"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberTransportZoneType)(nil)).Elem() +} + type DistributedVirtualSwitchNetworkResourceControlVersion string const ( @@ -997,6 +1088,17 @@ func init() { t["FileSystemMountInfoVStorageSupportStatus"] = reflect.TypeOf((*FileSystemMountInfoVStorageSupportStatus)(nil)).Elem() } +type FolderDesiredHostState string + +const ( + FolderDesiredHostStateMaintenance = FolderDesiredHostState("maintenance") + FolderDesiredHostStateNon_maintenance = FolderDesiredHostState("non_maintenance") +) + +func init() { + t["FolderDesiredHostState"] = reflect.TypeOf((*FolderDesiredHostState)(nil)).Elem() +} + type FtIssuesOnHostHostSelectionType string const ( @@ -1033,6 +1135,20 @@ func init() { t["GuestInfoAppStateType"] = reflect.TypeOf((*GuestInfoAppStateType)(nil)).Elem() } +type GuestInfoCustomizationStatus string + +const ( + GuestInfoCustomizationStatusTOOLSDEPLOYPKG_IDLE = GuestInfoCustomizationStatus("TOOLSDEPLOYPKG_IDLE") + GuestInfoCustomizationStatusTOOLSDEPLOYPKG_PENDING = GuestInfoCustomizationStatus("TOOLSDEPLOYPKG_PENDING") + GuestInfoCustomizationStatusTOOLSDEPLOYPKG_RUNNING = GuestInfoCustomizationStatus("TOOLSDEPLOYPKG_RUNNING") + GuestInfoCustomizationStatusTOOLSDEPLOYPKG_SUCCEEDED = GuestInfoCustomizationStatus("TOOLSDEPLOYPKG_SUCCEEDED") + GuestInfoCustomizationStatusTOOLSDEPLOYPKG_FAILED = GuestInfoCustomizationStatus("TOOLSDEPLOYPKG_FAILED") +) + +func init() { + t["GuestInfoCustomizationStatus"] = reflect.TypeOf((*GuestInfoCustomizationStatus)(nil)).Elem() +} + type GuestOsDescriptorFirmwareType string const ( @@ -1060,6 +1176,16 @@ func init() { t["GuestOsDescriptorSupportLevel"] = reflect.TypeOf((*GuestOsDescriptorSupportLevel)(nil)).Elem() } +type GuestQuiesceEndGuestQuiesceError string + +const ( + GuestQuiesceEndGuestQuiesceErrorFailure = GuestQuiesceEndGuestQuiesceError("failure") +) + +func init() { + t["GuestQuiesceEndGuestQuiesceError"] = reflect.TypeOf((*GuestQuiesceEndGuestQuiesceError)(nil)).Elem() +} + type GuestRegKeyWowSpec string const ( @@ -1213,6 +1339,7 @@ const ( HostCpuPackageVendorUnknown = HostCpuPackageVendor("unknown") HostCpuPackageVendorIntel = HostCpuPackageVendor("intel") HostCpuPackageVendorAmd = HostCpuPackageVendor("amd") + HostCpuPackageVendorHygon = HostCpuPackageVendor("hygon") ) func init() { @@ -1234,15 +1361,27 @@ func init() { type HostCryptoState string const ( - HostCryptoStateIncapable = HostCryptoState("incapable") - HostCryptoStatePrepared = HostCryptoState("prepared") - HostCryptoStateSafe = HostCryptoState("safe") + HostCryptoStateIncapable = HostCryptoState("incapable") + HostCryptoStatePrepared = HostCryptoState("prepared") + HostCryptoStateSafe = HostCryptoState("safe") + HostCryptoStatePendingIncapable = HostCryptoState("pendingIncapable") ) func init() { t["HostCryptoState"] = reflect.TypeOf((*HostCryptoState)(nil)).Elem() } +type HostDVSConfigSpecSwitchMode string + +const ( + HostDVSConfigSpecSwitchModeNormal = HostDVSConfigSpecSwitchMode("normal") + HostDVSConfigSpecSwitchModeMux = HostDVSConfigSpecSwitchMode("mux") +) + +func init() { + t["HostDVSConfigSpecSwitchMode"] = reflect.TypeOf((*HostDVSConfigSpecSwitchMode)(nil)).Elem() +} + type HostDasErrorEventHostDasErrorReason string const ( @@ -1260,6 +1399,17 @@ func init() { t["HostDasErrorEventHostDasErrorReason"] = reflect.TypeOf((*HostDasErrorEventHostDasErrorReason)(nil)).Elem() } +type HostDateTimeInfoProtocol string + +const ( + HostDateTimeInfoProtocolNtp = HostDateTimeInfoProtocol("ntp") + HostDateTimeInfoProtocolPtp = HostDateTimeInfoProtocol("ptp") +) + +func init() { + t["HostDateTimeInfoProtocol"] = reflect.TypeOf((*HostDateTimeInfoProtocol)(nil)).Elem() +} + type HostDigestInfoDigestMethodType string const ( @@ -1275,6 +1425,19 @@ func init() { t["HostDigestInfoDigestMethodType"] = reflect.TypeOf((*HostDigestInfoDigestMethodType)(nil)).Elem() } +type HostDigestVerificationSetting string + +const ( + HostDigestVerificationSettingDigestDisabled = HostDigestVerificationSetting("digestDisabled") + HostDigestVerificationSettingHeaderOnly = HostDigestVerificationSetting("headerOnly") + HostDigestVerificationSettingDataOnly = HostDigestVerificationSetting("dataOnly") + HostDigestVerificationSettingHeaderAndData = HostDigestVerificationSetting("headerAndData") +) + +func init() { + t["HostDigestVerificationSetting"] = reflect.TypeOf((*HostDigestVerificationSetting)(nil)).Elem() +} + type HostDisconnectedEventReasonCode string const ( @@ -1343,6 +1506,7 @@ const ( HostFileSystemVolumeFileSystemTypeVFFS = HostFileSystemVolumeFileSystemType("VFFS") HostFileSystemVolumeFileSystemTypeVVOL = HostFileSystemVolumeFileSystemType("VVOL") HostFileSystemVolumeFileSystemTypePMEM = HostFileSystemVolumeFileSystemType("PMEM") + HostFileSystemVolumeFileSystemTypeVsanD = HostFileSystemVolumeFileSystemType("vsanD") HostFileSystemVolumeFileSystemTypeOTHER = HostFileSystemVolumeFileSystemType("OTHER") ) @@ -1383,6 +1547,18 @@ func init() { t["HostFirewallRuleProtocol"] = reflect.TypeOf((*HostFirewallRuleProtocol)(nil)).Elem() } +type HostFruFruType string + +const ( + HostFruFruTypeUndefined = HostFruFruType("undefined") + HostFruFruTypeBoard = HostFruFruType("board") + HostFruFruTypeProduct = HostFruFruType("product") +) + +func init() { + t["HostFruFruType"] = reflect.TypeOf((*HostFruFruType)(nil)).Elem() +} + type HostGraphicsConfigGraphicsType string const ( @@ -1632,6 +1808,50 @@ func init() { t["HostLowLevelProvisioningManagerReloadTarget"] = reflect.TypeOf((*HostLowLevelProvisioningManagerReloadTarget)(nil)).Elem() } +type HostMaintenanceSpecPurpose string + +const ( + HostMaintenanceSpecPurposeHostUpgrade = HostMaintenanceSpecPurpose("hostUpgrade") +) + +func init() { + t["HostMaintenanceSpecPurpose"] = reflect.TypeOf((*HostMaintenanceSpecPurpose)(nil)).Elem() +} + +type HostMemoryTierFlags string + +const ( + HostMemoryTierFlagsMemoryTier = HostMemoryTierFlags("memoryTier") + HostMemoryTierFlagsPersistentTier = HostMemoryTierFlags("persistentTier") + HostMemoryTierFlagsCachingTier = HostMemoryTierFlags("cachingTier") +) + +func init() { + t["HostMemoryTierFlags"] = reflect.TypeOf((*HostMemoryTierFlags)(nil)).Elem() +} + +type HostMemoryTierType string + +const ( + HostMemoryTierTypeDRAM = HostMemoryTierType("DRAM") + HostMemoryTierTypePMem = HostMemoryTierType("PMem") +) + +func init() { + t["HostMemoryTierType"] = reflect.TypeOf((*HostMemoryTierType)(nil)).Elem() +} + +type HostMemoryTieringType string + +const ( + HostMemoryTieringTypeNoTiering = HostMemoryTieringType("noTiering") + HostMemoryTieringTypeHardwareTiering = HostMemoryTieringType("hardwareTiering") +) + +func init() { + t["HostMemoryTieringType"] = reflect.TypeOf((*HostMemoryTieringType)(nil)).Elem() +} + type HostMountInfoInaccessibleReason string const ( @@ -1644,6 +1864,24 @@ func init() { t["HostMountInfoInaccessibleReason"] = reflect.TypeOf((*HostMountInfoInaccessibleReason)(nil)).Elem() } +type HostMountInfoMountFailedReason string + +const ( + HostMountInfoMountFailedReasonCONNECT_FAILURE = HostMountInfoMountFailedReason("CONNECT_FAILURE") + HostMountInfoMountFailedReasonMOUNT_NOT_SUPPORTED = HostMountInfoMountFailedReason("MOUNT_NOT_SUPPORTED") + HostMountInfoMountFailedReasonNFS_NOT_SUPPORTED = HostMountInfoMountFailedReason("NFS_NOT_SUPPORTED") + HostMountInfoMountFailedReasonMOUNT_DENIED = HostMountInfoMountFailedReason("MOUNT_DENIED") + HostMountInfoMountFailedReasonMOUNT_NOT_DIR = HostMountInfoMountFailedReason("MOUNT_NOT_DIR") + HostMountInfoMountFailedReasonVOLUME_LIMIT_EXCEEDED = HostMountInfoMountFailedReason("VOLUME_LIMIT_EXCEEDED") + HostMountInfoMountFailedReasonCONN_LIMIT_EXCEEDED = HostMountInfoMountFailedReason("CONN_LIMIT_EXCEEDED") + HostMountInfoMountFailedReasonMOUNT_EXISTS = HostMountInfoMountFailedReason("MOUNT_EXISTS") + HostMountInfoMountFailedReasonOTHERS = HostMountInfoMountFailedReason("OTHERS") +) + +func init() { + t["HostMountInfoMountFailedReason"] = reflect.TypeOf((*HostMountInfoMountFailedReason)(nil)).Elem() +} + type HostMountMode string const ( @@ -1684,6 +1922,8 @@ const ( HostNetStackInstanceSystemStackKeyDefaultTcpipStack = HostNetStackInstanceSystemStackKey("defaultTcpipStack") HostNetStackInstanceSystemStackKeyVmotion = HostNetStackInstanceSystemStackKey("vmotion") HostNetStackInstanceSystemStackKeyVSphereProvisioning = HostNetStackInstanceSystemStackKey("vSphereProvisioning") + HostNetStackInstanceSystemStackKeyMirror = HostNetStackInstanceSystemStackKey("mirror") + HostNetStackInstanceSystemStackKeyOps = HostNetStackInstanceSystemStackKey("ops") ) func init() { @@ -1725,12 +1965,66 @@ func init() { t["HostNumericSensorType"] = reflect.TypeOf((*HostNumericSensorType)(nil)).Elem() } +type HostNvmeDiscoveryLogSubsystemType string + +const ( + HostNvmeDiscoveryLogSubsystemTypeDiscovery = HostNvmeDiscoveryLogSubsystemType("discovery") + HostNvmeDiscoveryLogSubsystemTypeNvm = HostNvmeDiscoveryLogSubsystemType("nvm") +) + +func init() { + t["HostNvmeDiscoveryLogSubsystemType"] = reflect.TypeOf((*HostNvmeDiscoveryLogSubsystemType)(nil)).Elem() +} + +type HostNvmeDiscoveryLogTransportRequirements string + +const ( + HostNvmeDiscoveryLogTransportRequirementsSecureChannelRequired = HostNvmeDiscoveryLogTransportRequirements("secureChannelRequired") + HostNvmeDiscoveryLogTransportRequirementsSecureChannelNotRequired = HostNvmeDiscoveryLogTransportRequirements("secureChannelNotRequired") + HostNvmeDiscoveryLogTransportRequirementsRequirementsNotSpecified = HostNvmeDiscoveryLogTransportRequirements("requirementsNotSpecified") +) + +func init() { + t["HostNvmeDiscoveryLogTransportRequirements"] = reflect.TypeOf((*HostNvmeDiscoveryLogTransportRequirements)(nil)).Elem() +} + +type HostNvmeTransportParametersNvmeAddressFamily string + +const ( + HostNvmeTransportParametersNvmeAddressFamilyIpv4 = HostNvmeTransportParametersNvmeAddressFamily("ipv4") + HostNvmeTransportParametersNvmeAddressFamilyIpv6 = HostNvmeTransportParametersNvmeAddressFamily("ipv6") + HostNvmeTransportParametersNvmeAddressFamilyInfiniBand = HostNvmeTransportParametersNvmeAddressFamily("infiniBand") + HostNvmeTransportParametersNvmeAddressFamilyFc = HostNvmeTransportParametersNvmeAddressFamily("fc") + HostNvmeTransportParametersNvmeAddressFamilyLoopback = HostNvmeTransportParametersNvmeAddressFamily("loopback") + HostNvmeTransportParametersNvmeAddressFamilyUnknown = HostNvmeTransportParametersNvmeAddressFamily("unknown") +) + +func init() { + t["HostNvmeTransportParametersNvmeAddressFamily"] = reflect.TypeOf((*HostNvmeTransportParametersNvmeAddressFamily)(nil)).Elem() +} + +type HostNvmeTransportType string + +const ( + HostNvmeTransportTypePcie = HostNvmeTransportType("pcie") + HostNvmeTransportTypeFibreChannel = HostNvmeTransportType("fibreChannel") + HostNvmeTransportTypeRdma = HostNvmeTransportType("rdma") + HostNvmeTransportTypeTcp = HostNvmeTransportType("tcp") + HostNvmeTransportTypeLoopback = HostNvmeTransportType("loopback") + HostNvmeTransportTypeUnsupported = HostNvmeTransportType("unsupported") +) + +func init() { + t["HostNvmeTransportType"] = reflect.TypeOf((*HostNvmeTransportType)(nil)).Elem() +} + type HostOpaqueSwitchOpaqueSwitchState string const ( - HostOpaqueSwitchOpaqueSwitchStateUp = HostOpaqueSwitchOpaqueSwitchState("up") - HostOpaqueSwitchOpaqueSwitchStateWarning = HostOpaqueSwitchOpaqueSwitchState("warning") - HostOpaqueSwitchOpaqueSwitchStateDown = HostOpaqueSwitchOpaqueSwitchState("down") + HostOpaqueSwitchOpaqueSwitchStateUp = HostOpaqueSwitchOpaqueSwitchState("up") + HostOpaqueSwitchOpaqueSwitchStateWarning = HostOpaqueSwitchOpaqueSwitchState("warning") + HostOpaqueSwitchOpaqueSwitchStateDown = HostOpaqueSwitchOpaqueSwitchState("down") + HostOpaqueSwitchOpaqueSwitchStateMaintenance = HostOpaqueSwitchOpaqueSwitchState("maintenance") ) func init() { @@ -1883,6 +2177,44 @@ func init() { t["HostProtocolEndpointProtocolEndpointType"] = reflect.TypeOf((*HostProtocolEndpointProtocolEndpointType)(nil)).Elem() } +type HostPtpConfigDeviceType string + +const ( + HostPtpConfigDeviceTypeNone = HostPtpConfigDeviceType("none") + HostPtpConfigDeviceTypeVirtualNic = HostPtpConfigDeviceType("virtualNic") + HostPtpConfigDeviceTypePciPassthruNic = HostPtpConfigDeviceType("pciPassthruNic") +) + +func init() { + t["HostPtpConfigDeviceType"] = reflect.TypeOf((*HostPtpConfigDeviceType)(nil)).Elem() +} + +type HostQualifiedNameType string + +const ( + HostQualifiedNameTypeNvmeQualifiedName = HostQualifiedNameType("nvmeQualifiedName") + HostQualifiedNameTypeVvolNvmeQualifiedName = HostQualifiedNameType("vvolNvmeQualifiedName") +) + +func init() { + t["HostQualifiedNameType"] = reflect.TypeOf((*HostQualifiedNameType)(nil)).Elem() +} + +type HostRdmaDeviceConnectionState string + +const ( + HostRdmaDeviceConnectionStateUnknown = HostRdmaDeviceConnectionState("unknown") + HostRdmaDeviceConnectionStateDown = HostRdmaDeviceConnectionState("down") + HostRdmaDeviceConnectionStateInit = HostRdmaDeviceConnectionState("init") + HostRdmaDeviceConnectionStateArmed = HostRdmaDeviceConnectionState("armed") + HostRdmaDeviceConnectionStateActive = HostRdmaDeviceConnectionState("active") + HostRdmaDeviceConnectionStateActiveDefer = HostRdmaDeviceConnectionState("activeDefer") +) + +func init() { + t["HostRdmaDeviceConnectionState"] = reflect.TypeOf((*HostRdmaDeviceConnectionState)(nil)).Elem() +} + type HostReplayUnsupportedReason string const ( @@ -1911,6 +2243,29 @@ func init() { t["HostRuntimeInfoNetStackInstanceRuntimeInfoState"] = reflect.TypeOf((*HostRuntimeInfoNetStackInstanceRuntimeInfoState)(nil)).Elem() } +type HostRuntimeInfoStateEncryptionInfoProtectionMode string + +const ( + HostRuntimeInfoStateEncryptionInfoProtectionModeNone = HostRuntimeInfoStateEncryptionInfoProtectionMode("none") + HostRuntimeInfoStateEncryptionInfoProtectionModeTpm = HostRuntimeInfoStateEncryptionInfoProtectionMode("tpm") +) + +func init() { + t["HostRuntimeInfoStateEncryptionInfoProtectionMode"] = reflect.TypeOf((*HostRuntimeInfoStateEncryptionInfoProtectionMode)(nil)).Elem() +} + +type HostRuntimeInfoStatelessNvdsMigrationState string + +const ( + HostRuntimeInfoStatelessNvdsMigrationStateReady = HostRuntimeInfoStatelessNvdsMigrationState("ready") + HostRuntimeInfoStatelessNvdsMigrationStateNotNeeded = HostRuntimeInfoStatelessNvdsMigrationState("notNeeded") + HostRuntimeInfoStatelessNvdsMigrationStateUnknown = HostRuntimeInfoStatelessNvdsMigrationState("unknown") +) + +func init() { + t["HostRuntimeInfoStatelessNvdsMigrationState"] = reflect.TypeOf((*HostRuntimeInfoStatelessNvdsMigrationState)(nil)).Elem() +} + type HostServicePolicy string const ( @@ -1923,6 +2278,70 @@ func init() { t["HostServicePolicy"] = reflect.TypeOf((*HostServicePolicy)(nil)).Elem() } +type HostSevInfoSevState string + +const ( + HostSevInfoSevStateUninitialized = HostSevInfoSevState("uninitialized") + HostSevInfoSevStateInitialized = HostSevInfoSevState("initialized") + HostSevInfoSevStateWorking = HostSevInfoSevState("working") +) + +func init() { + t["HostSevInfoSevState"] = reflect.TypeOf((*HostSevInfoSevState)(nil)).Elem() +} + +type HostSgxInfoFlcModes string + +const ( + HostSgxInfoFlcModesOff = HostSgxInfoFlcModes("off") + HostSgxInfoFlcModesLocked = HostSgxInfoFlcModes("locked") + HostSgxInfoFlcModesUnlocked = HostSgxInfoFlcModes("unlocked") +) + +func init() { + t["HostSgxInfoFlcModes"] = reflect.TypeOf((*HostSgxInfoFlcModes)(nil)).Elem() +} + +type HostSgxInfoSgxStates string + +const ( + HostSgxInfoSgxStatesNotPresent = HostSgxInfoSgxStates("notPresent") + HostSgxInfoSgxStatesDisabledBIOS = HostSgxInfoSgxStates("disabledBIOS") + HostSgxInfoSgxStatesDisabledCFW101 = HostSgxInfoSgxStates("disabledCFW101") + HostSgxInfoSgxStatesDisabledCPUMismatch = HostSgxInfoSgxStates("disabledCPUMismatch") + HostSgxInfoSgxStatesDisabledNoFLC = HostSgxInfoSgxStates("disabledNoFLC") + HostSgxInfoSgxStatesDisabledNUMAUnsup = HostSgxInfoSgxStates("disabledNUMAUnsup") + HostSgxInfoSgxStatesDisabledMaxEPCRegs = HostSgxInfoSgxStates("disabledMaxEPCRegs") + HostSgxInfoSgxStatesEnabled = HostSgxInfoSgxStates("enabled") +) + +func init() { + t["HostSgxInfoSgxStates"] = reflect.TypeOf((*HostSgxInfoSgxStates)(nil)).Elem() +} + +type HostSgxRegistrationInfoRegistrationStatus string + +const ( + HostSgxRegistrationInfoRegistrationStatusNotApplicable = HostSgxRegistrationInfoRegistrationStatus("notApplicable") + HostSgxRegistrationInfoRegistrationStatusIncomplete = HostSgxRegistrationInfoRegistrationStatus("incomplete") + HostSgxRegistrationInfoRegistrationStatusComplete = HostSgxRegistrationInfoRegistrationStatus("complete") +) + +func init() { + t["HostSgxRegistrationInfoRegistrationStatus"] = reflect.TypeOf((*HostSgxRegistrationInfoRegistrationStatus)(nil)).Elem() +} + +type HostSgxRegistrationInfoRegistrationType string + +const ( + HostSgxRegistrationInfoRegistrationTypeManifest = HostSgxRegistrationInfoRegistrationType("manifest") + HostSgxRegistrationInfoRegistrationTypeAddPackage = HostSgxRegistrationInfoRegistrationType("addPackage") +) + +func init() { + t["HostSgxRegistrationInfoRegistrationType"] = reflect.TypeOf((*HostSgxRegistrationInfoRegistrationType)(nil)).Elem() +} + type HostSnmpAgentCapability string const ( @@ -1948,6 +2367,17 @@ func init() { t["HostStandbyMode"] = reflect.TypeOf((*HostStandbyMode)(nil)).Elem() } +type HostStorageProtocol string + +const ( + HostStorageProtocolScsi = HostStorageProtocol("scsi") + HostStorageProtocolNvme = HostStorageProtocol("nvme") +) + +func init() { + t["HostStorageProtocol"] = reflect.TypeOf((*HostStorageProtocol)(nil)).Elem() +} + type HostSystemConnectionState string const ( @@ -1963,9 +2393,11 @@ func init() { type HostSystemIdentificationInfoIdentifier string const ( - HostSystemIdentificationInfoIdentifierAssetTag = HostSystemIdentificationInfoIdentifier("AssetTag") - HostSystemIdentificationInfoIdentifierServiceTag = HostSystemIdentificationInfoIdentifier("ServiceTag") - HostSystemIdentificationInfoIdentifierOemSpecificString = HostSystemIdentificationInfoIdentifier("OemSpecificString") + HostSystemIdentificationInfoIdentifierAssetTag = HostSystemIdentificationInfoIdentifier("AssetTag") + HostSystemIdentificationInfoIdentifierServiceTag = HostSystemIdentificationInfoIdentifier("ServiceTag") + HostSystemIdentificationInfoIdentifierOemSpecificString = HostSystemIdentificationInfoIdentifier("OemSpecificString") + HostSystemIdentificationInfoIdentifierEnclosureSerialNumberTag = HostSystemIdentificationInfoIdentifier("EnclosureSerialNumberTag") + HostSystemIdentificationInfoIdentifierSerialNumberTag = HostSystemIdentificationInfoIdentifier("SerialNumberTag") ) func init() { @@ -2011,6 +2443,18 @@ func init() { t["HostTpmAttestationInfoAcceptanceStatus"] = reflect.TypeOf((*HostTpmAttestationInfoAcceptanceStatus)(nil)).Elem() } +type HostTrustAuthorityAttestationInfoAttestationStatus string + +const ( + HostTrustAuthorityAttestationInfoAttestationStatusAttested = HostTrustAuthorityAttestationInfoAttestationStatus("attested") + HostTrustAuthorityAttestationInfoAttestationStatusNotAttested = HostTrustAuthorityAttestationInfoAttestationStatus("notAttested") + HostTrustAuthorityAttestationInfoAttestationStatusUnknown = HostTrustAuthorityAttestationInfoAttestationStatus("unknown") +) + +func init() { + t["HostTrustAuthorityAttestationInfoAttestationStatus"] = reflect.TypeOf((*HostTrustAuthorityAttestationInfoAttestationStatus)(nil)).Elem() +} + type HostUnresolvedVmfsExtentUnresolvedReason string const ( @@ -2044,6 +2488,10 @@ const ( HostVirtualNicManagerNicTypeVsan = HostVirtualNicManagerNicType("vsan") HostVirtualNicManagerNicTypeVSphereProvisioning = HostVirtualNicManagerNicType("vSphereProvisioning") HostVirtualNicManagerNicTypeVsanWitness = HostVirtualNicManagerNicType("vsanWitness") + HostVirtualNicManagerNicTypeVSphereBackupNFC = HostVirtualNicManagerNicType("vSphereBackupNFC") + HostVirtualNicManagerNicTypePtp = HostVirtualNicManagerNicType("ptp") + HostVirtualNicManagerNicTypeNvmeTcp = HostVirtualNicManagerNicType("nvmeTcp") + HostVirtualNicManagerNicTypeNvmeRdma = HostVirtualNicManagerNicType("nvmeRdma") ) func init() { @@ -2187,6 +2635,7 @@ const ( IoFilterTypeInspection = IoFilterType("inspection") IoFilterTypeDatastoreIoControl = IoFilterType("datastoreIoControl") IoFilterTypeDataProvider = IoFilterType("dataProvider") + IoFilterTypeDataCapture = IoFilterType("dataCapture") ) func init() { @@ -2206,6 +2655,19 @@ func init() { t["IscsiPortInfoPathStatus"] = reflect.TypeOf((*IscsiPortInfoPathStatus)(nil)).Elem() } +type KmipClusterInfoKmsManagementType string + +const ( + KmipClusterInfoKmsManagementTypeUnknown = KmipClusterInfoKmsManagementType("unknown") + KmipClusterInfoKmsManagementTypeVCenter = KmipClusterInfoKmsManagementType("vCenter") + KmipClusterInfoKmsManagementTypeTrustAuthority = KmipClusterInfoKmsManagementType("trustAuthority") + KmipClusterInfoKmsManagementTypeNativeProvider = KmipClusterInfoKmsManagementType("nativeProvider") +) + +func init() { + t["KmipClusterInfoKmsManagementType"] = reflect.TypeOf((*KmipClusterInfoKmsManagementType)(nil)).Elem() +} + type LatencySensitivitySensitivityLevel string const ( @@ -2487,6 +2949,32 @@ func init() { t["NvdimmInterleaveSetState"] = reflect.TypeOf((*NvdimmInterleaveSetState)(nil)).Elem() } +type NvdimmNamespaceDetailsHealthStatus string + +const ( + NvdimmNamespaceDetailsHealthStatusNormal = NvdimmNamespaceDetailsHealthStatus("normal") + NvdimmNamespaceDetailsHealthStatusMissing = NvdimmNamespaceDetailsHealthStatus("missing") + NvdimmNamespaceDetailsHealthStatusLabelMissing = NvdimmNamespaceDetailsHealthStatus("labelMissing") + NvdimmNamespaceDetailsHealthStatusInterleaveBroken = NvdimmNamespaceDetailsHealthStatus("interleaveBroken") + NvdimmNamespaceDetailsHealthStatusLabelInconsistent = NvdimmNamespaceDetailsHealthStatus("labelInconsistent") +) + +func init() { + t["NvdimmNamespaceDetailsHealthStatus"] = reflect.TypeOf((*NvdimmNamespaceDetailsHealthStatus)(nil)).Elem() +} + +type NvdimmNamespaceDetailsState string + +const ( + NvdimmNamespaceDetailsStateInvalid = NvdimmNamespaceDetailsState("invalid") + NvdimmNamespaceDetailsStateNotInUse = NvdimmNamespaceDetailsState("notInUse") + NvdimmNamespaceDetailsStateInUse = NvdimmNamespaceDetailsState("inUse") +) + +func init() { + t["NvdimmNamespaceDetailsState"] = reflect.TypeOf((*NvdimmNamespaceDetailsState)(nil)).Elem() +} + type NvdimmNamespaceHealthStatus string const ( @@ -2651,6 +3139,8 @@ const ( PerformanceManagerUnitWatt = PerformanceManagerUnit("watt") PerformanceManagerUnitJoule = PerformanceManagerUnit("joule") PerformanceManagerUnitTeraBytes = PerformanceManagerUnit("teraBytes") + PerformanceManagerUnitCelsius = PerformanceManagerUnit("celsius") + PerformanceManagerUnitNanosecond = PerformanceManagerUnit("nanosecond") ) func init() { @@ -2841,6 +3331,10 @@ const ( RecommendationReasonCodeHostExitDegradation = RecommendationReasonCode("hostExitDegradation") RecommendationReasonCodeMaxVmsConstraint = RecommendationReasonCode("maxVmsConstraint") RecommendationReasonCodeFtConstraints = RecommendationReasonCode("ftConstraints") + RecommendationReasonCodeVmHostAffinityPolicy = RecommendationReasonCode("vmHostAffinityPolicy") + RecommendationReasonCodeVmHostAntiAffinityPolicy = RecommendationReasonCode("vmHostAntiAffinityPolicy") + RecommendationReasonCodeVmAntiAffinityPolicy = RecommendationReasonCode("vmAntiAffinityPolicy") + RecommendationReasonCodeBalanceVsanUsage = RecommendationReasonCode("balanceVsanUsage") ) func init() { @@ -2943,6 +3437,17 @@ func init() { t["ReplicationVmState"] = reflect.TypeOf((*ReplicationVmState)(nil)).Elem() } +type ResourceConfigSpecScaleSharesBehavior string + +const ( + ResourceConfigSpecScaleSharesBehaviorDisabled = ResourceConfigSpecScaleSharesBehavior("disabled") + ResourceConfigSpecScaleSharesBehaviorScaleCpuAndMemoryShares = ResourceConfigSpecScaleSharesBehavior("scaleCpuAndMemoryShares") +) + +func init() { + t["ResourceConfigSpecScaleSharesBehavior"] = reflect.TypeOf((*ResourceConfigSpecScaleSharesBehavior)(nil)).Elem() +} + type ScheduledHardwareUpgradeInfoHardwareUpgradePolicy string const ( @@ -3046,6 +3551,18 @@ func init() { t["ScsiLunVStorageSupportStatus"] = reflect.TypeOf((*ScsiLunVStorageSupportStatus)(nil)).Elem() } +type SessionManagerGenericServiceTicketTicketType string + +const ( + SessionManagerGenericServiceTicketTicketTypeHttpNfcServiceTicket = SessionManagerGenericServiceTicketTicketType("HttpNfcServiceTicket") + SessionManagerGenericServiceTicketTicketTypeHostServiceTicket = SessionManagerGenericServiceTicketTicketType("HostServiceTicket") + SessionManagerGenericServiceTicketTicketTypeVcServiceTicket = SessionManagerGenericServiceTicketTicketType("VcServiceTicket") +) + +func init() { + t["SessionManagerGenericServiceTicketTicketType"] = reflect.TypeOf((*SessionManagerGenericServiceTicketTicketType)(nil)).Elem() +} + type SessionManagerHttpServiceRequestSpecMethod string const ( @@ -3430,6 +3947,17 @@ func init() { t["VMwareUplinkLacpMode"] = reflect.TypeOf((*VMwareUplinkLacpMode)(nil)).Elem() } +type VMwareUplinkLacpTimeoutMode string + +const ( + VMwareUplinkLacpTimeoutModeFast = VMwareUplinkLacpTimeoutMode("fast") + VMwareUplinkLacpTimeoutModeSlow = VMwareUplinkLacpTimeoutMode("slow") +) + +func init() { + t["VMwareUplinkLacpTimeoutMode"] = reflect.TypeOf((*VMwareUplinkLacpTimeoutMode)(nil)).Elem() +} + type VStorageObjectConsumptionType string const ( @@ -3526,6 +4054,17 @@ func init() { t["VirtualAppVAppState"] = reflect.TypeOf((*VirtualAppVAppState)(nil)).Elem() } +type VirtualDeviceConfigSpecChangeMode string + +const ( + VirtualDeviceConfigSpecChangeModeFail = VirtualDeviceConfigSpecChangeMode("fail") + VirtualDeviceConfigSpecChangeModeSkip = VirtualDeviceConfigSpecChangeMode("skip") +) + +func init() { + t["VirtualDeviceConfigSpecChangeMode"] = reflect.TypeOf((*VirtualDeviceConfigSpecChangeMode)(nil)).Elem() +} + type VirtualDeviceConfigSpecFileOperation string const ( @@ -3752,6 +4291,17 @@ func init() { t["VirtualEthernetCardMacType"] = reflect.TypeOf((*VirtualEthernetCardMacType)(nil)).Elem() } +type VirtualHardwareMotherboardLayout string + +const ( + VirtualHardwareMotherboardLayoutI440bxHostBridge = VirtualHardwareMotherboardLayout("i440bxHostBridge") + VirtualHardwareMotherboardLayoutAcpiHostBridges = VirtualHardwareMotherboardLayout("acpiHostBridges") +) + +func init() { + t["VirtualHardwareMotherboardLayout"] = reflect.TypeOf((*VirtualHardwareMotherboardLayout)(nil)).Elem() +} + type VirtualMachineAppHeartbeatStatusType string const ( @@ -3775,6 +4325,27 @@ func init() { t["VirtualMachineBootOptionsNetworkBootProtocolType"] = reflect.TypeOf((*VirtualMachineBootOptionsNetworkBootProtocolType)(nil)).Elem() } +type VirtualMachineCertThumbprintHashAlgorithm string + +const ( + VirtualMachineCertThumbprintHashAlgorithmSha256 = VirtualMachineCertThumbprintHashAlgorithm("sha256") +) + +func init() { + t["VirtualMachineCertThumbprintHashAlgorithm"] = reflect.TypeOf((*VirtualMachineCertThumbprintHashAlgorithm)(nil)).Elem() +} + +type VirtualMachineCloneSpecTpmProvisionPolicy string + +const ( + VirtualMachineCloneSpecTpmProvisionPolicyCopy = VirtualMachineCloneSpecTpmProvisionPolicy("copy") + VirtualMachineCloneSpecTpmProvisionPolicyReplace = VirtualMachineCloneSpecTpmProvisionPolicy("replace") +) + +func init() { + t["VirtualMachineCloneSpecTpmProvisionPolicy"] = reflect.TypeOf((*VirtualMachineCloneSpecTpmProvisionPolicy)(nil)).Elem() +} + type VirtualMachineConfigInfoNpivWwnType string const ( @@ -3799,6 +4370,18 @@ func init() { t["VirtualMachineConfigInfoSwapPlacementType"] = reflect.TypeOf((*VirtualMachineConfigInfoSwapPlacementType)(nil)).Elem() } +type VirtualMachineConfigSpecEncryptedFtModes string + +const ( + VirtualMachineConfigSpecEncryptedFtModesFtEncryptionDisabled = VirtualMachineConfigSpecEncryptedFtModes("ftEncryptionDisabled") + VirtualMachineConfigSpecEncryptedFtModesFtEncryptionOpportunistic = VirtualMachineConfigSpecEncryptedFtModes("ftEncryptionOpportunistic") + VirtualMachineConfigSpecEncryptedFtModesFtEncryptionRequired = VirtualMachineConfigSpecEncryptedFtModes("ftEncryptionRequired") +) + +func init() { + t["VirtualMachineConfigSpecEncryptedFtModes"] = reflect.TypeOf((*VirtualMachineConfigSpecEncryptedFtModes)(nil)).Elem() +} + type VirtualMachineConfigSpecEncryptedVMotionModes string const ( @@ -3913,29 +4496,31 @@ func init() { type VirtualMachineFileLayoutExFileType string const ( - VirtualMachineFileLayoutExFileTypeConfig = VirtualMachineFileLayoutExFileType("config") - VirtualMachineFileLayoutExFileTypeExtendedConfig = VirtualMachineFileLayoutExFileType("extendedConfig") - VirtualMachineFileLayoutExFileTypeDiskDescriptor = VirtualMachineFileLayoutExFileType("diskDescriptor") - VirtualMachineFileLayoutExFileTypeDiskExtent = VirtualMachineFileLayoutExFileType("diskExtent") - VirtualMachineFileLayoutExFileTypeDigestDescriptor = VirtualMachineFileLayoutExFileType("digestDescriptor") - VirtualMachineFileLayoutExFileTypeDigestExtent = VirtualMachineFileLayoutExFileType("digestExtent") - VirtualMachineFileLayoutExFileTypeDiskReplicationState = VirtualMachineFileLayoutExFileType("diskReplicationState") - VirtualMachineFileLayoutExFileTypeLog = VirtualMachineFileLayoutExFileType("log") - VirtualMachineFileLayoutExFileTypeStat = VirtualMachineFileLayoutExFileType("stat") - VirtualMachineFileLayoutExFileTypeNamespaceData = VirtualMachineFileLayoutExFileType("namespaceData") - VirtualMachineFileLayoutExFileTypeNvram = VirtualMachineFileLayoutExFileType("nvram") - VirtualMachineFileLayoutExFileTypeSnapshotData = VirtualMachineFileLayoutExFileType("snapshotData") - VirtualMachineFileLayoutExFileTypeSnapshotMemory = VirtualMachineFileLayoutExFileType("snapshotMemory") - VirtualMachineFileLayoutExFileTypeSnapshotList = VirtualMachineFileLayoutExFileType("snapshotList") - VirtualMachineFileLayoutExFileTypeSnapshotManifestList = VirtualMachineFileLayoutExFileType("snapshotManifestList") - VirtualMachineFileLayoutExFileTypeSuspend = VirtualMachineFileLayoutExFileType("suspend") - VirtualMachineFileLayoutExFileTypeSuspendMemory = VirtualMachineFileLayoutExFileType("suspendMemory") - VirtualMachineFileLayoutExFileTypeSwap = VirtualMachineFileLayoutExFileType("swap") - VirtualMachineFileLayoutExFileTypeUwswap = VirtualMachineFileLayoutExFileType("uwswap") - VirtualMachineFileLayoutExFileTypeCore = VirtualMachineFileLayoutExFileType("core") - VirtualMachineFileLayoutExFileTypeScreenshot = VirtualMachineFileLayoutExFileType("screenshot") - VirtualMachineFileLayoutExFileTypeFtMetadata = VirtualMachineFileLayoutExFileType("ftMetadata") - VirtualMachineFileLayoutExFileTypeGuestCustomization = VirtualMachineFileLayoutExFileType("guestCustomization") + VirtualMachineFileLayoutExFileTypeConfig = VirtualMachineFileLayoutExFileType("config") + VirtualMachineFileLayoutExFileTypeExtendedConfig = VirtualMachineFileLayoutExFileType("extendedConfig") + VirtualMachineFileLayoutExFileTypeDiskDescriptor = VirtualMachineFileLayoutExFileType("diskDescriptor") + VirtualMachineFileLayoutExFileTypeDiskExtent = VirtualMachineFileLayoutExFileType("diskExtent") + VirtualMachineFileLayoutExFileTypeDigestDescriptor = VirtualMachineFileLayoutExFileType("digestDescriptor") + VirtualMachineFileLayoutExFileTypeDigestExtent = VirtualMachineFileLayoutExFileType("digestExtent") + VirtualMachineFileLayoutExFileTypeDiskReplicationState = VirtualMachineFileLayoutExFileType("diskReplicationState") + VirtualMachineFileLayoutExFileTypeLog = VirtualMachineFileLayoutExFileType("log") + VirtualMachineFileLayoutExFileTypeStat = VirtualMachineFileLayoutExFileType("stat") + VirtualMachineFileLayoutExFileTypeNamespaceData = VirtualMachineFileLayoutExFileType("namespaceData") + VirtualMachineFileLayoutExFileTypeDataSetsDiskModeStore = VirtualMachineFileLayoutExFileType("dataSetsDiskModeStore") + VirtualMachineFileLayoutExFileTypeDataSetsVmModeStore = VirtualMachineFileLayoutExFileType("dataSetsVmModeStore") + VirtualMachineFileLayoutExFileTypeNvram = VirtualMachineFileLayoutExFileType("nvram") + VirtualMachineFileLayoutExFileTypeSnapshotData = VirtualMachineFileLayoutExFileType("snapshotData") + VirtualMachineFileLayoutExFileTypeSnapshotMemory = VirtualMachineFileLayoutExFileType("snapshotMemory") + VirtualMachineFileLayoutExFileTypeSnapshotList = VirtualMachineFileLayoutExFileType("snapshotList") + VirtualMachineFileLayoutExFileTypeSnapshotManifestList = VirtualMachineFileLayoutExFileType("snapshotManifestList") + VirtualMachineFileLayoutExFileTypeSuspend = VirtualMachineFileLayoutExFileType("suspend") + VirtualMachineFileLayoutExFileTypeSuspendMemory = VirtualMachineFileLayoutExFileType("suspendMemory") + VirtualMachineFileLayoutExFileTypeSwap = VirtualMachineFileLayoutExFileType("swap") + VirtualMachineFileLayoutExFileTypeUwswap = VirtualMachineFileLayoutExFileType("uwswap") + VirtualMachineFileLayoutExFileTypeCore = VirtualMachineFileLayoutExFileType("core") + VirtualMachineFileLayoutExFileTypeScreenshot = VirtualMachineFileLayoutExFileType("screenshot") + VirtualMachineFileLayoutExFileTypeFtMetadata = VirtualMachineFileLayoutExFileType("ftMetadata") + VirtualMachineFileLayoutExFileTypeGuestCustomization = VirtualMachineFileLayoutExFileType("guestCustomization") ) func init() { @@ -4008,167 +4593,201 @@ func init() { type VirtualMachineGuestOsIdentifier string const ( - VirtualMachineGuestOsIdentifierDosGuest = VirtualMachineGuestOsIdentifier("dosGuest") - VirtualMachineGuestOsIdentifierWin31Guest = VirtualMachineGuestOsIdentifier("win31Guest") - VirtualMachineGuestOsIdentifierWin95Guest = VirtualMachineGuestOsIdentifier("win95Guest") - VirtualMachineGuestOsIdentifierWin98Guest = VirtualMachineGuestOsIdentifier("win98Guest") - VirtualMachineGuestOsIdentifierWinMeGuest = VirtualMachineGuestOsIdentifier("winMeGuest") - VirtualMachineGuestOsIdentifierWinNTGuest = VirtualMachineGuestOsIdentifier("winNTGuest") - VirtualMachineGuestOsIdentifierWin2000ProGuest = VirtualMachineGuestOsIdentifier("win2000ProGuest") - VirtualMachineGuestOsIdentifierWin2000ServGuest = VirtualMachineGuestOsIdentifier("win2000ServGuest") - VirtualMachineGuestOsIdentifierWin2000AdvServGuest = VirtualMachineGuestOsIdentifier("win2000AdvServGuest") - VirtualMachineGuestOsIdentifierWinXPHomeGuest = VirtualMachineGuestOsIdentifier("winXPHomeGuest") - VirtualMachineGuestOsIdentifierWinXPProGuest = VirtualMachineGuestOsIdentifier("winXPProGuest") - VirtualMachineGuestOsIdentifierWinXPPro64Guest = VirtualMachineGuestOsIdentifier("winXPPro64Guest") - VirtualMachineGuestOsIdentifierWinNetWebGuest = VirtualMachineGuestOsIdentifier("winNetWebGuest") - VirtualMachineGuestOsIdentifierWinNetStandardGuest = VirtualMachineGuestOsIdentifier("winNetStandardGuest") - VirtualMachineGuestOsIdentifierWinNetEnterpriseGuest = VirtualMachineGuestOsIdentifier("winNetEnterpriseGuest") - VirtualMachineGuestOsIdentifierWinNetDatacenterGuest = VirtualMachineGuestOsIdentifier("winNetDatacenterGuest") - VirtualMachineGuestOsIdentifierWinNetBusinessGuest = VirtualMachineGuestOsIdentifier("winNetBusinessGuest") - VirtualMachineGuestOsIdentifierWinNetStandard64Guest = VirtualMachineGuestOsIdentifier("winNetStandard64Guest") - VirtualMachineGuestOsIdentifierWinNetEnterprise64Guest = VirtualMachineGuestOsIdentifier("winNetEnterprise64Guest") - VirtualMachineGuestOsIdentifierWinLonghornGuest = VirtualMachineGuestOsIdentifier("winLonghornGuest") - VirtualMachineGuestOsIdentifierWinLonghorn64Guest = VirtualMachineGuestOsIdentifier("winLonghorn64Guest") - VirtualMachineGuestOsIdentifierWinNetDatacenter64Guest = VirtualMachineGuestOsIdentifier("winNetDatacenter64Guest") - VirtualMachineGuestOsIdentifierWinVistaGuest = VirtualMachineGuestOsIdentifier("winVistaGuest") - VirtualMachineGuestOsIdentifierWinVista64Guest = VirtualMachineGuestOsIdentifier("winVista64Guest") - VirtualMachineGuestOsIdentifierWindows7Guest = VirtualMachineGuestOsIdentifier("windows7Guest") - VirtualMachineGuestOsIdentifierWindows7_64Guest = VirtualMachineGuestOsIdentifier("windows7_64Guest") - VirtualMachineGuestOsIdentifierWindows7Server64Guest = VirtualMachineGuestOsIdentifier("windows7Server64Guest") - VirtualMachineGuestOsIdentifierWindows8Guest = VirtualMachineGuestOsIdentifier("windows8Guest") - VirtualMachineGuestOsIdentifierWindows8_64Guest = VirtualMachineGuestOsIdentifier("windows8_64Guest") - VirtualMachineGuestOsIdentifierWindows8Server64Guest = VirtualMachineGuestOsIdentifier("windows8Server64Guest") - VirtualMachineGuestOsIdentifierWindows9Guest = VirtualMachineGuestOsIdentifier("windows9Guest") - VirtualMachineGuestOsIdentifierWindows9_64Guest = VirtualMachineGuestOsIdentifier("windows9_64Guest") - VirtualMachineGuestOsIdentifierWindows9Server64Guest = VirtualMachineGuestOsIdentifier("windows9Server64Guest") - VirtualMachineGuestOsIdentifierWindowsHyperVGuest = VirtualMachineGuestOsIdentifier("windowsHyperVGuest") - VirtualMachineGuestOsIdentifierFreebsdGuest = VirtualMachineGuestOsIdentifier("freebsdGuest") - VirtualMachineGuestOsIdentifierFreebsd64Guest = VirtualMachineGuestOsIdentifier("freebsd64Guest") - VirtualMachineGuestOsIdentifierFreebsd11Guest = VirtualMachineGuestOsIdentifier("freebsd11Guest") - VirtualMachineGuestOsIdentifierFreebsd11_64Guest = VirtualMachineGuestOsIdentifier("freebsd11_64Guest") - VirtualMachineGuestOsIdentifierFreebsd12Guest = VirtualMachineGuestOsIdentifier("freebsd12Guest") - VirtualMachineGuestOsIdentifierFreebsd12_64Guest = VirtualMachineGuestOsIdentifier("freebsd12_64Guest") - VirtualMachineGuestOsIdentifierRedhatGuest = VirtualMachineGuestOsIdentifier("redhatGuest") - VirtualMachineGuestOsIdentifierRhel2Guest = VirtualMachineGuestOsIdentifier("rhel2Guest") - VirtualMachineGuestOsIdentifierRhel3Guest = VirtualMachineGuestOsIdentifier("rhel3Guest") - VirtualMachineGuestOsIdentifierRhel3_64Guest = VirtualMachineGuestOsIdentifier("rhel3_64Guest") - VirtualMachineGuestOsIdentifierRhel4Guest = VirtualMachineGuestOsIdentifier("rhel4Guest") - VirtualMachineGuestOsIdentifierRhel4_64Guest = VirtualMachineGuestOsIdentifier("rhel4_64Guest") - VirtualMachineGuestOsIdentifierRhel5Guest = VirtualMachineGuestOsIdentifier("rhel5Guest") - VirtualMachineGuestOsIdentifierRhel5_64Guest = VirtualMachineGuestOsIdentifier("rhel5_64Guest") - VirtualMachineGuestOsIdentifierRhel6Guest = VirtualMachineGuestOsIdentifier("rhel6Guest") - VirtualMachineGuestOsIdentifierRhel6_64Guest = VirtualMachineGuestOsIdentifier("rhel6_64Guest") - VirtualMachineGuestOsIdentifierRhel7Guest = VirtualMachineGuestOsIdentifier("rhel7Guest") - VirtualMachineGuestOsIdentifierRhel7_64Guest = VirtualMachineGuestOsIdentifier("rhel7_64Guest") - VirtualMachineGuestOsIdentifierRhel8_64Guest = VirtualMachineGuestOsIdentifier("rhel8_64Guest") - VirtualMachineGuestOsIdentifierCentosGuest = VirtualMachineGuestOsIdentifier("centosGuest") - VirtualMachineGuestOsIdentifierCentos64Guest = VirtualMachineGuestOsIdentifier("centos64Guest") - VirtualMachineGuestOsIdentifierCentos6Guest = VirtualMachineGuestOsIdentifier("centos6Guest") - VirtualMachineGuestOsIdentifierCentos6_64Guest = VirtualMachineGuestOsIdentifier("centos6_64Guest") - VirtualMachineGuestOsIdentifierCentos7Guest = VirtualMachineGuestOsIdentifier("centos7Guest") - VirtualMachineGuestOsIdentifierCentos7_64Guest = VirtualMachineGuestOsIdentifier("centos7_64Guest") - VirtualMachineGuestOsIdentifierCentos8_64Guest = VirtualMachineGuestOsIdentifier("centos8_64Guest") - VirtualMachineGuestOsIdentifierOracleLinuxGuest = VirtualMachineGuestOsIdentifier("oracleLinuxGuest") - VirtualMachineGuestOsIdentifierOracleLinux64Guest = VirtualMachineGuestOsIdentifier("oracleLinux64Guest") - VirtualMachineGuestOsIdentifierOracleLinux6Guest = VirtualMachineGuestOsIdentifier("oracleLinux6Guest") - VirtualMachineGuestOsIdentifierOracleLinux6_64Guest = VirtualMachineGuestOsIdentifier("oracleLinux6_64Guest") - VirtualMachineGuestOsIdentifierOracleLinux7Guest = VirtualMachineGuestOsIdentifier("oracleLinux7Guest") - VirtualMachineGuestOsIdentifierOracleLinux7_64Guest = VirtualMachineGuestOsIdentifier("oracleLinux7_64Guest") - VirtualMachineGuestOsIdentifierOracleLinux8_64Guest = VirtualMachineGuestOsIdentifier("oracleLinux8_64Guest") - VirtualMachineGuestOsIdentifierSuseGuest = VirtualMachineGuestOsIdentifier("suseGuest") - VirtualMachineGuestOsIdentifierSuse64Guest = VirtualMachineGuestOsIdentifier("suse64Guest") - VirtualMachineGuestOsIdentifierSlesGuest = VirtualMachineGuestOsIdentifier("slesGuest") - VirtualMachineGuestOsIdentifierSles64Guest = VirtualMachineGuestOsIdentifier("sles64Guest") - VirtualMachineGuestOsIdentifierSles10Guest = VirtualMachineGuestOsIdentifier("sles10Guest") - VirtualMachineGuestOsIdentifierSles10_64Guest = VirtualMachineGuestOsIdentifier("sles10_64Guest") - VirtualMachineGuestOsIdentifierSles11Guest = VirtualMachineGuestOsIdentifier("sles11Guest") - VirtualMachineGuestOsIdentifierSles11_64Guest = VirtualMachineGuestOsIdentifier("sles11_64Guest") - VirtualMachineGuestOsIdentifierSles12Guest = VirtualMachineGuestOsIdentifier("sles12Guest") - VirtualMachineGuestOsIdentifierSles12_64Guest = VirtualMachineGuestOsIdentifier("sles12_64Guest") - VirtualMachineGuestOsIdentifierSles15_64Guest = VirtualMachineGuestOsIdentifier("sles15_64Guest") - VirtualMachineGuestOsIdentifierNld9Guest = VirtualMachineGuestOsIdentifier("nld9Guest") - VirtualMachineGuestOsIdentifierOesGuest = VirtualMachineGuestOsIdentifier("oesGuest") - VirtualMachineGuestOsIdentifierSjdsGuest = VirtualMachineGuestOsIdentifier("sjdsGuest") - VirtualMachineGuestOsIdentifierMandrakeGuest = VirtualMachineGuestOsIdentifier("mandrakeGuest") - VirtualMachineGuestOsIdentifierMandrivaGuest = VirtualMachineGuestOsIdentifier("mandrivaGuest") - VirtualMachineGuestOsIdentifierMandriva64Guest = VirtualMachineGuestOsIdentifier("mandriva64Guest") - VirtualMachineGuestOsIdentifierTurboLinuxGuest = VirtualMachineGuestOsIdentifier("turboLinuxGuest") - VirtualMachineGuestOsIdentifierTurboLinux64Guest = VirtualMachineGuestOsIdentifier("turboLinux64Guest") - VirtualMachineGuestOsIdentifierUbuntuGuest = VirtualMachineGuestOsIdentifier("ubuntuGuest") - VirtualMachineGuestOsIdentifierUbuntu64Guest = VirtualMachineGuestOsIdentifier("ubuntu64Guest") - VirtualMachineGuestOsIdentifierDebian4Guest = VirtualMachineGuestOsIdentifier("debian4Guest") - VirtualMachineGuestOsIdentifierDebian4_64Guest = VirtualMachineGuestOsIdentifier("debian4_64Guest") - VirtualMachineGuestOsIdentifierDebian5Guest = VirtualMachineGuestOsIdentifier("debian5Guest") - VirtualMachineGuestOsIdentifierDebian5_64Guest = VirtualMachineGuestOsIdentifier("debian5_64Guest") - VirtualMachineGuestOsIdentifierDebian6Guest = VirtualMachineGuestOsIdentifier("debian6Guest") - VirtualMachineGuestOsIdentifierDebian6_64Guest = VirtualMachineGuestOsIdentifier("debian6_64Guest") - VirtualMachineGuestOsIdentifierDebian7Guest = VirtualMachineGuestOsIdentifier("debian7Guest") - VirtualMachineGuestOsIdentifierDebian7_64Guest = VirtualMachineGuestOsIdentifier("debian7_64Guest") - VirtualMachineGuestOsIdentifierDebian8Guest = VirtualMachineGuestOsIdentifier("debian8Guest") - VirtualMachineGuestOsIdentifierDebian8_64Guest = VirtualMachineGuestOsIdentifier("debian8_64Guest") - VirtualMachineGuestOsIdentifierDebian9Guest = VirtualMachineGuestOsIdentifier("debian9Guest") - VirtualMachineGuestOsIdentifierDebian9_64Guest = VirtualMachineGuestOsIdentifier("debian9_64Guest") - VirtualMachineGuestOsIdentifierDebian10Guest = VirtualMachineGuestOsIdentifier("debian10Guest") - VirtualMachineGuestOsIdentifierDebian10_64Guest = VirtualMachineGuestOsIdentifier("debian10_64Guest") - VirtualMachineGuestOsIdentifierAsianux3Guest = VirtualMachineGuestOsIdentifier("asianux3Guest") - VirtualMachineGuestOsIdentifierAsianux3_64Guest = VirtualMachineGuestOsIdentifier("asianux3_64Guest") - VirtualMachineGuestOsIdentifierAsianux4Guest = VirtualMachineGuestOsIdentifier("asianux4Guest") - VirtualMachineGuestOsIdentifierAsianux4_64Guest = VirtualMachineGuestOsIdentifier("asianux4_64Guest") - VirtualMachineGuestOsIdentifierAsianux5_64Guest = VirtualMachineGuestOsIdentifier("asianux5_64Guest") - VirtualMachineGuestOsIdentifierAsianux7_64Guest = VirtualMachineGuestOsIdentifier("asianux7_64Guest") - VirtualMachineGuestOsIdentifierAsianux8_64Guest = VirtualMachineGuestOsIdentifier("asianux8_64Guest") - VirtualMachineGuestOsIdentifierOpensuseGuest = VirtualMachineGuestOsIdentifier("opensuseGuest") - VirtualMachineGuestOsIdentifierOpensuse64Guest = VirtualMachineGuestOsIdentifier("opensuse64Guest") - VirtualMachineGuestOsIdentifierFedoraGuest = VirtualMachineGuestOsIdentifier("fedoraGuest") - VirtualMachineGuestOsIdentifierFedora64Guest = VirtualMachineGuestOsIdentifier("fedora64Guest") - VirtualMachineGuestOsIdentifierCoreos64Guest = VirtualMachineGuestOsIdentifier("coreos64Guest") - VirtualMachineGuestOsIdentifierVmwarePhoton64Guest = VirtualMachineGuestOsIdentifier("vmwarePhoton64Guest") - VirtualMachineGuestOsIdentifierOther24xLinuxGuest = VirtualMachineGuestOsIdentifier("other24xLinuxGuest") - VirtualMachineGuestOsIdentifierOther26xLinuxGuest = VirtualMachineGuestOsIdentifier("other26xLinuxGuest") - VirtualMachineGuestOsIdentifierOtherLinuxGuest = VirtualMachineGuestOsIdentifier("otherLinuxGuest") - VirtualMachineGuestOsIdentifierOther3xLinuxGuest = VirtualMachineGuestOsIdentifier("other3xLinuxGuest") - VirtualMachineGuestOsIdentifierOther4xLinuxGuest = VirtualMachineGuestOsIdentifier("other4xLinuxGuest") - VirtualMachineGuestOsIdentifierGenericLinuxGuest = VirtualMachineGuestOsIdentifier("genericLinuxGuest") - VirtualMachineGuestOsIdentifierOther24xLinux64Guest = VirtualMachineGuestOsIdentifier("other24xLinux64Guest") - VirtualMachineGuestOsIdentifierOther26xLinux64Guest = VirtualMachineGuestOsIdentifier("other26xLinux64Guest") - VirtualMachineGuestOsIdentifierOther3xLinux64Guest = VirtualMachineGuestOsIdentifier("other3xLinux64Guest") - VirtualMachineGuestOsIdentifierOther4xLinux64Guest = VirtualMachineGuestOsIdentifier("other4xLinux64Guest") - VirtualMachineGuestOsIdentifierOtherLinux64Guest = VirtualMachineGuestOsIdentifier("otherLinux64Guest") - VirtualMachineGuestOsIdentifierSolaris6Guest = VirtualMachineGuestOsIdentifier("solaris6Guest") - VirtualMachineGuestOsIdentifierSolaris7Guest = VirtualMachineGuestOsIdentifier("solaris7Guest") - VirtualMachineGuestOsIdentifierSolaris8Guest = VirtualMachineGuestOsIdentifier("solaris8Guest") - VirtualMachineGuestOsIdentifierSolaris9Guest = VirtualMachineGuestOsIdentifier("solaris9Guest") - VirtualMachineGuestOsIdentifierSolaris10Guest = VirtualMachineGuestOsIdentifier("solaris10Guest") - VirtualMachineGuestOsIdentifierSolaris10_64Guest = VirtualMachineGuestOsIdentifier("solaris10_64Guest") - VirtualMachineGuestOsIdentifierSolaris11_64Guest = VirtualMachineGuestOsIdentifier("solaris11_64Guest") - VirtualMachineGuestOsIdentifierOs2Guest = VirtualMachineGuestOsIdentifier("os2Guest") - VirtualMachineGuestOsIdentifierEComStationGuest = VirtualMachineGuestOsIdentifier("eComStationGuest") - VirtualMachineGuestOsIdentifierEComStation2Guest = VirtualMachineGuestOsIdentifier("eComStation2Guest") - VirtualMachineGuestOsIdentifierNetware4Guest = VirtualMachineGuestOsIdentifier("netware4Guest") - VirtualMachineGuestOsIdentifierNetware5Guest = VirtualMachineGuestOsIdentifier("netware5Guest") - VirtualMachineGuestOsIdentifierNetware6Guest = VirtualMachineGuestOsIdentifier("netware6Guest") - VirtualMachineGuestOsIdentifierOpenServer5Guest = VirtualMachineGuestOsIdentifier("openServer5Guest") - VirtualMachineGuestOsIdentifierOpenServer6Guest = VirtualMachineGuestOsIdentifier("openServer6Guest") - VirtualMachineGuestOsIdentifierUnixWare7Guest = VirtualMachineGuestOsIdentifier("unixWare7Guest") - VirtualMachineGuestOsIdentifierDarwinGuest = VirtualMachineGuestOsIdentifier("darwinGuest") - VirtualMachineGuestOsIdentifierDarwin64Guest = VirtualMachineGuestOsIdentifier("darwin64Guest") - VirtualMachineGuestOsIdentifierDarwin10Guest = VirtualMachineGuestOsIdentifier("darwin10Guest") - VirtualMachineGuestOsIdentifierDarwin10_64Guest = VirtualMachineGuestOsIdentifier("darwin10_64Guest") - VirtualMachineGuestOsIdentifierDarwin11Guest = VirtualMachineGuestOsIdentifier("darwin11Guest") - VirtualMachineGuestOsIdentifierDarwin11_64Guest = VirtualMachineGuestOsIdentifier("darwin11_64Guest") - VirtualMachineGuestOsIdentifierDarwin12_64Guest = VirtualMachineGuestOsIdentifier("darwin12_64Guest") - VirtualMachineGuestOsIdentifierDarwin13_64Guest = VirtualMachineGuestOsIdentifier("darwin13_64Guest") - VirtualMachineGuestOsIdentifierDarwin14_64Guest = VirtualMachineGuestOsIdentifier("darwin14_64Guest") - VirtualMachineGuestOsIdentifierDarwin15_64Guest = VirtualMachineGuestOsIdentifier("darwin15_64Guest") - VirtualMachineGuestOsIdentifierDarwin16_64Guest = VirtualMachineGuestOsIdentifier("darwin16_64Guest") - VirtualMachineGuestOsIdentifierDarwin17_64Guest = VirtualMachineGuestOsIdentifier("darwin17_64Guest") - VirtualMachineGuestOsIdentifierDarwin18_64Guest = VirtualMachineGuestOsIdentifier("darwin18_64Guest") - VirtualMachineGuestOsIdentifierVmkernelGuest = VirtualMachineGuestOsIdentifier("vmkernelGuest") - VirtualMachineGuestOsIdentifierVmkernel5Guest = VirtualMachineGuestOsIdentifier("vmkernel5Guest") - VirtualMachineGuestOsIdentifierVmkernel6Guest = VirtualMachineGuestOsIdentifier("vmkernel6Guest") - VirtualMachineGuestOsIdentifierVmkernel65Guest = VirtualMachineGuestOsIdentifier("vmkernel65Guest") - VirtualMachineGuestOsIdentifierOtherGuest = VirtualMachineGuestOsIdentifier("otherGuest") - VirtualMachineGuestOsIdentifierOtherGuest64 = VirtualMachineGuestOsIdentifier("otherGuest64") + VirtualMachineGuestOsIdentifierDosGuest = VirtualMachineGuestOsIdentifier("dosGuest") + VirtualMachineGuestOsIdentifierWin31Guest = VirtualMachineGuestOsIdentifier("win31Guest") + VirtualMachineGuestOsIdentifierWin95Guest = VirtualMachineGuestOsIdentifier("win95Guest") + VirtualMachineGuestOsIdentifierWin98Guest = VirtualMachineGuestOsIdentifier("win98Guest") + VirtualMachineGuestOsIdentifierWinMeGuest = VirtualMachineGuestOsIdentifier("winMeGuest") + VirtualMachineGuestOsIdentifierWinNTGuest = VirtualMachineGuestOsIdentifier("winNTGuest") + VirtualMachineGuestOsIdentifierWin2000ProGuest = VirtualMachineGuestOsIdentifier("win2000ProGuest") + VirtualMachineGuestOsIdentifierWin2000ServGuest = VirtualMachineGuestOsIdentifier("win2000ServGuest") + VirtualMachineGuestOsIdentifierWin2000AdvServGuest = VirtualMachineGuestOsIdentifier("win2000AdvServGuest") + VirtualMachineGuestOsIdentifierWinXPHomeGuest = VirtualMachineGuestOsIdentifier("winXPHomeGuest") + VirtualMachineGuestOsIdentifierWinXPProGuest = VirtualMachineGuestOsIdentifier("winXPProGuest") + VirtualMachineGuestOsIdentifierWinXPPro64Guest = VirtualMachineGuestOsIdentifier("winXPPro64Guest") + VirtualMachineGuestOsIdentifierWinNetWebGuest = VirtualMachineGuestOsIdentifier("winNetWebGuest") + VirtualMachineGuestOsIdentifierWinNetStandardGuest = VirtualMachineGuestOsIdentifier("winNetStandardGuest") + VirtualMachineGuestOsIdentifierWinNetEnterpriseGuest = VirtualMachineGuestOsIdentifier("winNetEnterpriseGuest") + VirtualMachineGuestOsIdentifierWinNetDatacenterGuest = VirtualMachineGuestOsIdentifier("winNetDatacenterGuest") + VirtualMachineGuestOsIdentifierWinNetBusinessGuest = VirtualMachineGuestOsIdentifier("winNetBusinessGuest") + VirtualMachineGuestOsIdentifierWinNetStandard64Guest = VirtualMachineGuestOsIdentifier("winNetStandard64Guest") + VirtualMachineGuestOsIdentifierWinNetEnterprise64Guest = VirtualMachineGuestOsIdentifier("winNetEnterprise64Guest") + VirtualMachineGuestOsIdentifierWinLonghornGuest = VirtualMachineGuestOsIdentifier("winLonghornGuest") + VirtualMachineGuestOsIdentifierWinLonghorn64Guest = VirtualMachineGuestOsIdentifier("winLonghorn64Guest") + VirtualMachineGuestOsIdentifierWinNetDatacenter64Guest = VirtualMachineGuestOsIdentifier("winNetDatacenter64Guest") + VirtualMachineGuestOsIdentifierWinVistaGuest = VirtualMachineGuestOsIdentifier("winVistaGuest") + VirtualMachineGuestOsIdentifierWinVista64Guest = VirtualMachineGuestOsIdentifier("winVista64Guest") + VirtualMachineGuestOsIdentifierWindows7Guest = VirtualMachineGuestOsIdentifier("windows7Guest") + VirtualMachineGuestOsIdentifierWindows7_64Guest = VirtualMachineGuestOsIdentifier("windows7_64Guest") + VirtualMachineGuestOsIdentifierWindows7Server64Guest = VirtualMachineGuestOsIdentifier("windows7Server64Guest") + VirtualMachineGuestOsIdentifierWindows8Guest = VirtualMachineGuestOsIdentifier("windows8Guest") + VirtualMachineGuestOsIdentifierWindows8_64Guest = VirtualMachineGuestOsIdentifier("windows8_64Guest") + VirtualMachineGuestOsIdentifierWindows8Server64Guest = VirtualMachineGuestOsIdentifier("windows8Server64Guest") + VirtualMachineGuestOsIdentifierWindows9Guest = VirtualMachineGuestOsIdentifier("windows9Guest") + VirtualMachineGuestOsIdentifierWindows9_64Guest = VirtualMachineGuestOsIdentifier("windows9_64Guest") + VirtualMachineGuestOsIdentifierWindows9Server64Guest = VirtualMachineGuestOsIdentifier("windows9Server64Guest") + VirtualMachineGuestOsIdentifierWindows11_64Guest = VirtualMachineGuestOsIdentifier("windows11_64Guest") + VirtualMachineGuestOsIdentifierWindows12_64Guest = VirtualMachineGuestOsIdentifier("windows12_64Guest") + VirtualMachineGuestOsIdentifierWindowsHyperVGuest = VirtualMachineGuestOsIdentifier("windowsHyperVGuest") + VirtualMachineGuestOsIdentifierWindows2019srv_64Guest = VirtualMachineGuestOsIdentifier("windows2019srv_64Guest") + VirtualMachineGuestOsIdentifierWindows2019srvNext_64Guest = VirtualMachineGuestOsIdentifier("windows2019srvNext_64Guest") + VirtualMachineGuestOsIdentifierWindows2022srvNext_64Guest = VirtualMachineGuestOsIdentifier("windows2022srvNext_64Guest") + VirtualMachineGuestOsIdentifierFreebsdGuest = VirtualMachineGuestOsIdentifier("freebsdGuest") + VirtualMachineGuestOsIdentifierFreebsd64Guest = VirtualMachineGuestOsIdentifier("freebsd64Guest") + VirtualMachineGuestOsIdentifierFreebsd11Guest = VirtualMachineGuestOsIdentifier("freebsd11Guest") + VirtualMachineGuestOsIdentifierFreebsd11_64Guest = VirtualMachineGuestOsIdentifier("freebsd11_64Guest") + VirtualMachineGuestOsIdentifierFreebsd12Guest = VirtualMachineGuestOsIdentifier("freebsd12Guest") + VirtualMachineGuestOsIdentifierFreebsd12_64Guest = VirtualMachineGuestOsIdentifier("freebsd12_64Guest") + VirtualMachineGuestOsIdentifierFreebsd13Guest = VirtualMachineGuestOsIdentifier("freebsd13Guest") + VirtualMachineGuestOsIdentifierFreebsd13_64Guest = VirtualMachineGuestOsIdentifier("freebsd13_64Guest") + VirtualMachineGuestOsIdentifierFreebsd14Guest = VirtualMachineGuestOsIdentifier("freebsd14Guest") + VirtualMachineGuestOsIdentifierFreebsd14_64Guest = VirtualMachineGuestOsIdentifier("freebsd14_64Guest") + VirtualMachineGuestOsIdentifierRedhatGuest = VirtualMachineGuestOsIdentifier("redhatGuest") + VirtualMachineGuestOsIdentifierRhel2Guest = VirtualMachineGuestOsIdentifier("rhel2Guest") + VirtualMachineGuestOsIdentifierRhel3Guest = VirtualMachineGuestOsIdentifier("rhel3Guest") + VirtualMachineGuestOsIdentifierRhel3_64Guest = VirtualMachineGuestOsIdentifier("rhel3_64Guest") + VirtualMachineGuestOsIdentifierRhel4Guest = VirtualMachineGuestOsIdentifier("rhel4Guest") + VirtualMachineGuestOsIdentifierRhel4_64Guest = VirtualMachineGuestOsIdentifier("rhel4_64Guest") + VirtualMachineGuestOsIdentifierRhel5Guest = VirtualMachineGuestOsIdentifier("rhel5Guest") + VirtualMachineGuestOsIdentifierRhel5_64Guest = VirtualMachineGuestOsIdentifier("rhel5_64Guest") + VirtualMachineGuestOsIdentifierRhel6Guest = VirtualMachineGuestOsIdentifier("rhel6Guest") + VirtualMachineGuestOsIdentifierRhel6_64Guest = VirtualMachineGuestOsIdentifier("rhel6_64Guest") + VirtualMachineGuestOsIdentifierRhel7Guest = VirtualMachineGuestOsIdentifier("rhel7Guest") + VirtualMachineGuestOsIdentifierRhel7_64Guest = VirtualMachineGuestOsIdentifier("rhel7_64Guest") + VirtualMachineGuestOsIdentifierRhel8_64Guest = VirtualMachineGuestOsIdentifier("rhel8_64Guest") + VirtualMachineGuestOsIdentifierRhel9_64Guest = VirtualMachineGuestOsIdentifier("rhel9_64Guest") + VirtualMachineGuestOsIdentifierCentosGuest = VirtualMachineGuestOsIdentifier("centosGuest") + VirtualMachineGuestOsIdentifierCentos64Guest = VirtualMachineGuestOsIdentifier("centos64Guest") + VirtualMachineGuestOsIdentifierCentos6Guest = VirtualMachineGuestOsIdentifier("centos6Guest") + VirtualMachineGuestOsIdentifierCentos6_64Guest = VirtualMachineGuestOsIdentifier("centos6_64Guest") + VirtualMachineGuestOsIdentifierCentos7Guest = VirtualMachineGuestOsIdentifier("centos7Guest") + VirtualMachineGuestOsIdentifierCentos7_64Guest = VirtualMachineGuestOsIdentifier("centos7_64Guest") + VirtualMachineGuestOsIdentifierCentos8_64Guest = VirtualMachineGuestOsIdentifier("centos8_64Guest") + VirtualMachineGuestOsIdentifierCentos9_64Guest = VirtualMachineGuestOsIdentifier("centos9_64Guest") + VirtualMachineGuestOsIdentifierOracleLinuxGuest = VirtualMachineGuestOsIdentifier("oracleLinuxGuest") + VirtualMachineGuestOsIdentifierOracleLinux64Guest = VirtualMachineGuestOsIdentifier("oracleLinux64Guest") + VirtualMachineGuestOsIdentifierOracleLinux6Guest = VirtualMachineGuestOsIdentifier("oracleLinux6Guest") + VirtualMachineGuestOsIdentifierOracleLinux6_64Guest = VirtualMachineGuestOsIdentifier("oracleLinux6_64Guest") + VirtualMachineGuestOsIdentifierOracleLinux7Guest = VirtualMachineGuestOsIdentifier("oracleLinux7Guest") + VirtualMachineGuestOsIdentifierOracleLinux7_64Guest = VirtualMachineGuestOsIdentifier("oracleLinux7_64Guest") + VirtualMachineGuestOsIdentifierOracleLinux8_64Guest = VirtualMachineGuestOsIdentifier("oracleLinux8_64Guest") + VirtualMachineGuestOsIdentifierOracleLinux9_64Guest = VirtualMachineGuestOsIdentifier("oracleLinux9_64Guest") + VirtualMachineGuestOsIdentifierSuseGuest = VirtualMachineGuestOsIdentifier("suseGuest") + VirtualMachineGuestOsIdentifierSuse64Guest = VirtualMachineGuestOsIdentifier("suse64Guest") + VirtualMachineGuestOsIdentifierSlesGuest = VirtualMachineGuestOsIdentifier("slesGuest") + VirtualMachineGuestOsIdentifierSles64Guest = VirtualMachineGuestOsIdentifier("sles64Guest") + VirtualMachineGuestOsIdentifierSles10Guest = VirtualMachineGuestOsIdentifier("sles10Guest") + VirtualMachineGuestOsIdentifierSles10_64Guest = VirtualMachineGuestOsIdentifier("sles10_64Guest") + VirtualMachineGuestOsIdentifierSles11Guest = VirtualMachineGuestOsIdentifier("sles11Guest") + VirtualMachineGuestOsIdentifierSles11_64Guest = VirtualMachineGuestOsIdentifier("sles11_64Guest") + VirtualMachineGuestOsIdentifierSles12Guest = VirtualMachineGuestOsIdentifier("sles12Guest") + VirtualMachineGuestOsIdentifierSles12_64Guest = VirtualMachineGuestOsIdentifier("sles12_64Guest") + VirtualMachineGuestOsIdentifierSles15_64Guest = VirtualMachineGuestOsIdentifier("sles15_64Guest") + VirtualMachineGuestOsIdentifierSles16_64Guest = VirtualMachineGuestOsIdentifier("sles16_64Guest") + VirtualMachineGuestOsIdentifierNld9Guest = VirtualMachineGuestOsIdentifier("nld9Guest") + VirtualMachineGuestOsIdentifierOesGuest = VirtualMachineGuestOsIdentifier("oesGuest") + VirtualMachineGuestOsIdentifierSjdsGuest = VirtualMachineGuestOsIdentifier("sjdsGuest") + VirtualMachineGuestOsIdentifierMandrakeGuest = VirtualMachineGuestOsIdentifier("mandrakeGuest") + VirtualMachineGuestOsIdentifierMandrivaGuest = VirtualMachineGuestOsIdentifier("mandrivaGuest") + VirtualMachineGuestOsIdentifierMandriva64Guest = VirtualMachineGuestOsIdentifier("mandriva64Guest") + VirtualMachineGuestOsIdentifierTurboLinuxGuest = VirtualMachineGuestOsIdentifier("turboLinuxGuest") + VirtualMachineGuestOsIdentifierTurboLinux64Guest = VirtualMachineGuestOsIdentifier("turboLinux64Guest") + VirtualMachineGuestOsIdentifierUbuntuGuest = VirtualMachineGuestOsIdentifier("ubuntuGuest") + VirtualMachineGuestOsIdentifierUbuntu64Guest = VirtualMachineGuestOsIdentifier("ubuntu64Guest") + VirtualMachineGuestOsIdentifierDebian4Guest = VirtualMachineGuestOsIdentifier("debian4Guest") + VirtualMachineGuestOsIdentifierDebian4_64Guest = VirtualMachineGuestOsIdentifier("debian4_64Guest") + VirtualMachineGuestOsIdentifierDebian5Guest = VirtualMachineGuestOsIdentifier("debian5Guest") + VirtualMachineGuestOsIdentifierDebian5_64Guest = VirtualMachineGuestOsIdentifier("debian5_64Guest") + VirtualMachineGuestOsIdentifierDebian6Guest = VirtualMachineGuestOsIdentifier("debian6Guest") + VirtualMachineGuestOsIdentifierDebian6_64Guest = VirtualMachineGuestOsIdentifier("debian6_64Guest") + VirtualMachineGuestOsIdentifierDebian7Guest = VirtualMachineGuestOsIdentifier("debian7Guest") + VirtualMachineGuestOsIdentifierDebian7_64Guest = VirtualMachineGuestOsIdentifier("debian7_64Guest") + VirtualMachineGuestOsIdentifierDebian8Guest = VirtualMachineGuestOsIdentifier("debian8Guest") + VirtualMachineGuestOsIdentifierDebian8_64Guest = VirtualMachineGuestOsIdentifier("debian8_64Guest") + VirtualMachineGuestOsIdentifierDebian9Guest = VirtualMachineGuestOsIdentifier("debian9Guest") + VirtualMachineGuestOsIdentifierDebian9_64Guest = VirtualMachineGuestOsIdentifier("debian9_64Guest") + VirtualMachineGuestOsIdentifierDebian10Guest = VirtualMachineGuestOsIdentifier("debian10Guest") + VirtualMachineGuestOsIdentifierDebian10_64Guest = VirtualMachineGuestOsIdentifier("debian10_64Guest") + VirtualMachineGuestOsIdentifierDebian11Guest = VirtualMachineGuestOsIdentifier("debian11Guest") + VirtualMachineGuestOsIdentifierDebian11_64Guest = VirtualMachineGuestOsIdentifier("debian11_64Guest") + VirtualMachineGuestOsIdentifierDebian12Guest = VirtualMachineGuestOsIdentifier("debian12Guest") + VirtualMachineGuestOsIdentifierDebian12_64Guest = VirtualMachineGuestOsIdentifier("debian12_64Guest") + VirtualMachineGuestOsIdentifierAsianux3Guest = VirtualMachineGuestOsIdentifier("asianux3Guest") + VirtualMachineGuestOsIdentifierAsianux3_64Guest = VirtualMachineGuestOsIdentifier("asianux3_64Guest") + VirtualMachineGuestOsIdentifierAsianux4Guest = VirtualMachineGuestOsIdentifier("asianux4Guest") + VirtualMachineGuestOsIdentifierAsianux4_64Guest = VirtualMachineGuestOsIdentifier("asianux4_64Guest") + VirtualMachineGuestOsIdentifierAsianux5_64Guest = VirtualMachineGuestOsIdentifier("asianux5_64Guest") + VirtualMachineGuestOsIdentifierAsianux7_64Guest = VirtualMachineGuestOsIdentifier("asianux7_64Guest") + VirtualMachineGuestOsIdentifierAsianux8_64Guest = VirtualMachineGuestOsIdentifier("asianux8_64Guest") + VirtualMachineGuestOsIdentifierAsianux9_64Guest = VirtualMachineGuestOsIdentifier("asianux9_64Guest") + VirtualMachineGuestOsIdentifierOpensuseGuest = VirtualMachineGuestOsIdentifier("opensuseGuest") + VirtualMachineGuestOsIdentifierOpensuse64Guest = VirtualMachineGuestOsIdentifier("opensuse64Guest") + VirtualMachineGuestOsIdentifierFedoraGuest = VirtualMachineGuestOsIdentifier("fedoraGuest") + VirtualMachineGuestOsIdentifierFedora64Guest = VirtualMachineGuestOsIdentifier("fedora64Guest") + VirtualMachineGuestOsIdentifierCoreos64Guest = VirtualMachineGuestOsIdentifier("coreos64Guest") + VirtualMachineGuestOsIdentifierVmwarePhoton64Guest = VirtualMachineGuestOsIdentifier("vmwarePhoton64Guest") + VirtualMachineGuestOsIdentifierOther24xLinuxGuest = VirtualMachineGuestOsIdentifier("other24xLinuxGuest") + VirtualMachineGuestOsIdentifierOther26xLinuxGuest = VirtualMachineGuestOsIdentifier("other26xLinuxGuest") + VirtualMachineGuestOsIdentifierOtherLinuxGuest = VirtualMachineGuestOsIdentifier("otherLinuxGuest") + VirtualMachineGuestOsIdentifierOther3xLinuxGuest = VirtualMachineGuestOsIdentifier("other3xLinuxGuest") + VirtualMachineGuestOsIdentifierOther4xLinuxGuest = VirtualMachineGuestOsIdentifier("other4xLinuxGuest") + VirtualMachineGuestOsIdentifierOther5xLinuxGuest = VirtualMachineGuestOsIdentifier("other5xLinuxGuest") + VirtualMachineGuestOsIdentifierOther6xLinuxGuest = VirtualMachineGuestOsIdentifier("other6xLinuxGuest") + VirtualMachineGuestOsIdentifierGenericLinuxGuest = VirtualMachineGuestOsIdentifier("genericLinuxGuest") + VirtualMachineGuestOsIdentifierOther24xLinux64Guest = VirtualMachineGuestOsIdentifier("other24xLinux64Guest") + VirtualMachineGuestOsIdentifierOther26xLinux64Guest = VirtualMachineGuestOsIdentifier("other26xLinux64Guest") + VirtualMachineGuestOsIdentifierOther3xLinux64Guest = VirtualMachineGuestOsIdentifier("other3xLinux64Guest") + VirtualMachineGuestOsIdentifierOther4xLinux64Guest = VirtualMachineGuestOsIdentifier("other4xLinux64Guest") + VirtualMachineGuestOsIdentifierOther5xLinux64Guest = VirtualMachineGuestOsIdentifier("other5xLinux64Guest") + VirtualMachineGuestOsIdentifierOther6xLinux64Guest = VirtualMachineGuestOsIdentifier("other6xLinux64Guest") + VirtualMachineGuestOsIdentifierOtherLinux64Guest = VirtualMachineGuestOsIdentifier("otherLinux64Guest") + VirtualMachineGuestOsIdentifierSolaris6Guest = VirtualMachineGuestOsIdentifier("solaris6Guest") + VirtualMachineGuestOsIdentifierSolaris7Guest = VirtualMachineGuestOsIdentifier("solaris7Guest") + VirtualMachineGuestOsIdentifierSolaris8Guest = VirtualMachineGuestOsIdentifier("solaris8Guest") + VirtualMachineGuestOsIdentifierSolaris9Guest = VirtualMachineGuestOsIdentifier("solaris9Guest") + VirtualMachineGuestOsIdentifierSolaris10Guest = VirtualMachineGuestOsIdentifier("solaris10Guest") + VirtualMachineGuestOsIdentifierSolaris10_64Guest = VirtualMachineGuestOsIdentifier("solaris10_64Guest") + VirtualMachineGuestOsIdentifierSolaris11_64Guest = VirtualMachineGuestOsIdentifier("solaris11_64Guest") + VirtualMachineGuestOsIdentifierOs2Guest = VirtualMachineGuestOsIdentifier("os2Guest") + VirtualMachineGuestOsIdentifierEComStationGuest = VirtualMachineGuestOsIdentifier("eComStationGuest") + VirtualMachineGuestOsIdentifierEComStation2Guest = VirtualMachineGuestOsIdentifier("eComStation2Guest") + VirtualMachineGuestOsIdentifierNetware4Guest = VirtualMachineGuestOsIdentifier("netware4Guest") + VirtualMachineGuestOsIdentifierNetware5Guest = VirtualMachineGuestOsIdentifier("netware5Guest") + VirtualMachineGuestOsIdentifierNetware6Guest = VirtualMachineGuestOsIdentifier("netware6Guest") + VirtualMachineGuestOsIdentifierOpenServer5Guest = VirtualMachineGuestOsIdentifier("openServer5Guest") + VirtualMachineGuestOsIdentifierOpenServer6Guest = VirtualMachineGuestOsIdentifier("openServer6Guest") + VirtualMachineGuestOsIdentifierUnixWare7Guest = VirtualMachineGuestOsIdentifier("unixWare7Guest") + VirtualMachineGuestOsIdentifierDarwinGuest = VirtualMachineGuestOsIdentifier("darwinGuest") + VirtualMachineGuestOsIdentifierDarwin64Guest = VirtualMachineGuestOsIdentifier("darwin64Guest") + VirtualMachineGuestOsIdentifierDarwin10Guest = VirtualMachineGuestOsIdentifier("darwin10Guest") + VirtualMachineGuestOsIdentifierDarwin10_64Guest = VirtualMachineGuestOsIdentifier("darwin10_64Guest") + VirtualMachineGuestOsIdentifierDarwin11Guest = VirtualMachineGuestOsIdentifier("darwin11Guest") + VirtualMachineGuestOsIdentifierDarwin11_64Guest = VirtualMachineGuestOsIdentifier("darwin11_64Guest") + VirtualMachineGuestOsIdentifierDarwin12_64Guest = VirtualMachineGuestOsIdentifier("darwin12_64Guest") + VirtualMachineGuestOsIdentifierDarwin13_64Guest = VirtualMachineGuestOsIdentifier("darwin13_64Guest") + VirtualMachineGuestOsIdentifierDarwin14_64Guest = VirtualMachineGuestOsIdentifier("darwin14_64Guest") + VirtualMachineGuestOsIdentifierDarwin15_64Guest = VirtualMachineGuestOsIdentifier("darwin15_64Guest") + VirtualMachineGuestOsIdentifierDarwin16_64Guest = VirtualMachineGuestOsIdentifier("darwin16_64Guest") + VirtualMachineGuestOsIdentifierDarwin17_64Guest = VirtualMachineGuestOsIdentifier("darwin17_64Guest") + VirtualMachineGuestOsIdentifierDarwin18_64Guest = VirtualMachineGuestOsIdentifier("darwin18_64Guest") + VirtualMachineGuestOsIdentifierDarwin19_64Guest = VirtualMachineGuestOsIdentifier("darwin19_64Guest") + VirtualMachineGuestOsIdentifierDarwin20_64Guest = VirtualMachineGuestOsIdentifier("darwin20_64Guest") + VirtualMachineGuestOsIdentifierDarwin21_64Guest = VirtualMachineGuestOsIdentifier("darwin21_64Guest") + VirtualMachineGuestOsIdentifierDarwin22_64Guest = VirtualMachineGuestOsIdentifier("darwin22_64Guest") + VirtualMachineGuestOsIdentifierDarwin23_64Guest = VirtualMachineGuestOsIdentifier("darwin23_64Guest") + VirtualMachineGuestOsIdentifierVmkernelGuest = VirtualMachineGuestOsIdentifier("vmkernelGuest") + VirtualMachineGuestOsIdentifierVmkernel5Guest = VirtualMachineGuestOsIdentifier("vmkernel5Guest") + VirtualMachineGuestOsIdentifierVmkernel6Guest = VirtualMachineGuestOsIdentifier("vmkernel6Guest") + VirtualMachineGuestOsIdentifierVmkernel65Guest = VirtualMachineGuestOsIdentifier("vmkernel65Guest") + VirtualMachineGuestOsIdentifierVmkernel7Guest = VirtualMachineGuestOsIdentifier("vmkernel7Guest") + VirtualMachineGuestOsIdentifierVmkernel8Guest = VirtualMachineGuestOsIdentifier("vmkernel8Guest") + VirtualMachineGuestOsIdentifierAmazonlinux2_64Guest = VirtualMachineGuestOsIdentifier("amazonlinux2_64Guest") + VirtualMachineGuestOsIdentifierAmazonlinux3_64Guest = VirtualMachineGuestOsIdentifier("amazonlinux3_64Guest") + VirtualMachineGuestOsIdentifierCrxPod1Guest = VirtualMachineGuestOsIdentifier("crxPod1Guest") + VirtualMachineGuestOsIdentifierRockylinux_64Guest = VirtualMachineGuestOsIdentifier("rockylinux_64Guest") + VirtualMachineGuestOsIdentifierAlmalinux_64Guest = VirtualMachineGuestOsIdentifier("almalinux_64Guest") + VirtualMachineGuestOsIdentifierOtherGuest = VirtualMachineGuestOsIdentifier("otherGuest") + VirtualMachineGuestOsIdentifierOtherGuest64 = VirtualMachineGuestOsIdentifier("otherGuest64") ) func init() { @@ -4358,6 +4977,17 @@ func init() { t["VirtualMachineScsiPassthroughType"] = reflect.TypeOf((*VirtualMachineScsiPassthroughType)(nil)).Elem() } +type VirtualMachineSgxInfoFlcModes string + +const ( + VirtualMachineSgxInfoFlcModesLocked = VirtualMachineSgxInfoFlcModes("locked") + VirtualMachineSgxInfoFlcModesUnlocked = VirtualMachineSgxInfoFlcModes("unlocked") +) + +func init() { + t["VirtualMachineSgxInfoFlcModes"] = reflect.TypeOf((*VirtualMachineSgxInfoFlcModes)(nil)).Elem() +} + type VirtualMachineStandbyActionType string const ( @@ -4383,11 +5013,12 @@ func init() { type VirtualMachineTicketType string const ( - VirtualMachineTicketTypeMks = VirtualMachineTicketType("mks") - VirtualMachineTicketTypeDevice = VirtualMachineTicketType("device") - VirtualMachineTicketTypeGuestControl = VirtualMachineTicketType("guestControl") - VirtualMachineTicketTypeWebmks = VirtualMachineTicketType("webmks") - VirtualMachineTicketTypeGuestIntegrity = VirtualMachineTicketType("guestIntegrity") + VirtualMachineTicketTypeMks = VirtualMachineTicketType("mks") + VirtualMachineTicketTypeDevice = VirtualMachineTicketType("device") + VirtualMachineTicketTypeGuestControl = VirtualMachineTicketType("guestControl") + VirtualMachineTicketTypeWebmks = VirtualMachineTicketType("webmks") + VirtualMachineTicketTypeGuestIntegrity = VirtualMachineTicketType("guestIntegrity") + VirtualMachineTicketTypeWebRemoteDevice = VirtualMachineTicketType("webRemoteDevice") ) func init() { @@ -4482,11 +5113,13 @@ func init() { type VirtualMachineUsbInfoSpeed string const ( - VirtualMachineUsbInfoSpeedLow = VirtualMachineUsbInfoSpeed("low") - VirtualMachineUsbInfoSpeedFull = VirtualMachineUsbInfoSpeed("full") - VirtualMachineUsbInfoSpeedHigh = VirtualMachineUsbInfoSpeed("high") - VirtualMachineUsbInfoSpeedSuperSpeed = VirtualMachineUsbInfoSpeed("superSpeed") - VirtualMachineUsbInfoSpeedUnknownSpeed = VirtualMachineUsbInfoSpeed("unknownSpeed") + VirtualMachineUsbInfoSpeedLow = VirtualMachineUsbInfoSpeed("low") + VirtualMachineUsbInfoSpeedFull = VirtualMachineUsbInfoSpeed("full") + VirtualMachineUsbInfoSpeedHigh = VirtualMachineUsbInfoSpeed("high") + VirtualMachineUsbInfoSpeedSuperSpeed = VirtualMachineUsbInfoSpeed("superSpeed") + VirtualMachineUsbInfoSpeedSuperSpeedPlus = VirtualMachineUsbInfoSpeed("superSpeedPlus") + VirtualMachineUsbInfoSpeedSuperSpeed20Gbps = VirtualMachineUsbInfoSpeed("superSpeed20Gbps") + VirtualMachineUsbInfoSpeedUnknownSpeed = VirtualMachineUsbInfoSpeed("unknownSpeed") ) func init() { @@ -4531,6 +5164,41 @@ func init() { t["VirtualMachineVMCIDeviceProtocol"] = reflect.TypeOf((*VirtualMachineVMCIDeviceProtocol)(nil)).Elem() } +type VirtualMachineVendorDeviceGroupInfoComponentDeviceInfoComponentType string + +const ( + VirtualMachineVendorDeviceGroupInfoComponentDeviceInfoComponentTypePciPassthru = VirtualMachineVendorDeviceGroupInfoComponentDeviceInfoComponentType("pciPassthru") + VirtualMachineVendorDeviceGroupInfoComponentDeviceInfoComponentTypeNvidiaVgpu = VirtualMachineVendorDeviceGroupInfoComponentDeviceInfoComponentType("nvidiaVgpu") + VirtualMachineVendorDeviceGroupInfoComponentDeviceInfoComponentTypeSriovNic = VirtualMachineVendorDeviceGroupInfoComponentDeviceInfoComponentType("sriovNic") + VirtualMachineVendorDeviceGroupInfoComponentDeviceInfoComponentTypeDvx = VirtualMachineVendorDeviceGroupInfoComponentDeviceInfoComponentType("dvx") +) + +func init() { + t["VirtualMachineVendorDeviceGroupInfoComponentDeviceInfoComponentType"] = reflect.TypeOf((*VirtualMachineVendorDeviceGroupInfoComponentDeviceInfoComponentType)(nil)).Elem() +} + +type VirtualMachineVgpuProfileInfoProfileClass string + +const ( + VirtualMachineVgpuProfileInfoProfileClassCompute = VirtualMachineVgpuProfileInfoProfileClass("compute") + VirtualMachineVgpuProfileInfoProfileClassQuadro = VirtualMachineVgpuProfileInfoProfileClass("quadro") +) + +func init() { + t["VirtualMachineVgpuProfileInfoProfileClass"] = reflect.TypeOf((*VirtualMachineVgpuProfileInfoProfileClass)(nil)).Elem() +} + +type VirtualMachineVgpuProfileInfoProfileSharing string + +const ( + VirtualMachineVgpuProfileInfoProfileSharingTimeSliced = VirtualMachineVgpuProfileInfoProfileSharing("timeSliced") + VirtualMachineVgpuProfileInfoProfileSharingMig = VirtualMachineVgpuProfileInfoProfileSharing("mig") +) + +func init() { + t["VirtualMachineVgpuProfileInfoProfileSharing"] = reflect.TypeOf((*VirtualMachineVgpuProfileInfoProfileSharing)(nil)).Elem() +} + type VirtualMachineVideoCardUse3dRenderer string const ( @@ -4543,6 +5211,31 @@ func init() { t["VirtualMachineVideoCardUse3dRenderer"] = reflect.TypeOf((*VirtualMachineVideoCardUse3dRenderer)(nil)).Elem() } +type VirtualMachineVirtualDeviceSwapDeviceSwapStatus string + +const ( + VirtualMachineVirtualDeviceSwapDeviceSwapStatusNone = VirtualMachineVirtualDeviceSwapDeviceSwapStatus("none") + VirtualMachineVirtualDeviceSwapDeviceSwapStatusScheduled = VirtualMachineVirtualDeviceSwapDeviceSwapStatus("scheduled") + VirtualMachineVirtualDeviceSwapDeviceSwapStatusInprogress = VirtualMachineVirtualDeviceSwapDeviceSwapStatus("inprogress") + VirtualMachineVirtualDeviceSwapDeviceSwapStatusFailed = VirtualMachineVirtualDeviceSwapDeviceSwapStatus("failed") + VirtualMachineVirtualDeviceSwapDeviceSwapStatusCompleted = VirtualMachineVirtualDeviceSwapDeviceSwapStatus("completed") +) + +func init() { + t["VirtualMachineVirtualDeviceSwapDeviceSwapStatus"] = reflect.TypeOf((*VirtualMachineVirtualDeviceSwapDeviceSwapStatus)(nil)).Elem() +} + +type VirtualMachineVirtualPMemSnapshotMode string + +const ( + VirtualMachineVirtualPMemSnapshotModeIndependent_persistent = VirtualMachineVirtualPMemSnapshotMode("independent_persistent") + VirtualMachineVirtualPMemSnapshotModeIndependent_eraseonrevert = VirtualMachineVirtualPMemSnapshotMode("independent_eraseonrevert") +) + +func init() { + t["VirtualMachineVirtualPMemSnapshotMode"] = reflect.TypeOf((*VirtualMachineVirtualPMemSnapshotMode)(nil)).Elem() +} + type VirtualMachineWindowsQuiesceSpecVssBackupContext string const ( @@ -4665,6 +5358,7 @@ const ( VmFaultToleranceConfigIssueReasonForIssueHasEFIFirmware = VmFaultToleranceConfigIssueReasonForIssue("hasEFIFirmware") VmFaultToleranceConfigIssueReasonForIssueTooManyVCPUs = VmFaultToleranceConfigIssueReasonForIssue("tooManyVCPUs") VmFaultToleranceConfigIssueReasonForIssueTooMuchMemory = VmFaultToleranceConfigIssueReasonForIssue("tooMuchMemory") + VmFaultToleranceConfigIssueReasonForIssueUnsupportedPMemHAFailOver = VmFaultToleranceConfigIssueReasonForIssue("unsupportedPMemHAFailOver") ) func init() { @@ -4811,6 +5505,35 @@ func init() { t["WillLoseHAProtectionResolution"] = reflect.TypeOf((*WillLoseHAProtectionResolution)(nil)).Elem() } +type VslmDiskInfoFlag string + +const ( + VslmDiskInfoFlagId = VslmDiskInfoFlag("id") + VslmDiskInfoFlagBackingObjectId = VslmDiskInfoFlag("backingObjectId") + VslmDiskInfoFlagPath = VslmDiskInfoFlag("path") + VslmDiskInfoFlagParentPath = VslmDiskInfoFlag("parentPath") + VslmDiskInfoFlagName = VslmDiskInfoFlag("name") + VslmDiskInfoFlagDeviceName = VslmDiskInfoFlag("deviceName") + VslmDiskInfoFlagCapacity = VslmDiskInfoFlag("capacity") + VslmDiskInfoFlagAllocated = VslmDiskInfoFlag("allocated") + VslmDiskInfoFlagType = VslmDiskInfoFlag("type") + VslmDiskInfoFlagConsumers = VslmDiskInfoFlag("consumers") + VslmDiskInfoFlagTentativeState = VslmDiskInfoFlag("tentativeState") + VslmDiskInfoFlagCreateTime = VslmDiskInfoFlag("createTime") + VslmDiskInfoFlagIoFilter = VslmDiskInfoFlag("ioFilter") + VslmDiskInfoFlagControlFlags = VslmDiskInfoFlag("controlFlags") + VslmDiskInfoFlagKeepAfterVmDelete = VslmDiskInfoFlag("keepAfterVmDelete") + VslmDiskInfoFlagRelocationDisabled = VslmDiskInfoFlag("relocationDisabled") + VslmDiskInfoFlagKeyId = VslmDiskInfoFlag("keyId") + VslmDiskInfoFlagKeyProviderId = VslmDiskInfoFlag("keyProviderId") + VslmDiskInfoFlagNativeSnapshotSupported = VslmDiskInfoFlag("nativeSnapshotSupported") + VslmDiskInfoFlagCbtEnabled = VslmDiskInfoFlag("cbtEnabled") +) + +func init() { + t["vslmDiskInfoFlag"] = reflect.TypeOf((*VslmDiskInfoFlag)(nil)).Elem() +} + type VslmVStorageObjectControlFlag string const ( diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/types/fault.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/types/fault.go index c2503fa5cb72..813ea9ec60e7 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/types/fault.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/types/fault.go @@ -30,3 +30,14 @@ func IsFileNotFound(err error) bool { return false } + +func IsAlreadyExists(err error) bool { + if f, ok := err.(HasFault); ok { + switch f.Fault().(type) { + case *AlreadyExists: + return true + } + } + + return false +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/types/helpers.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/types/helpers.go index 95c49f333f38..70360eb4fcae 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/types/helpers.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/types/helpers.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2015-2017 VMware, Inc. All Rights Reserved. +Copyright (c) 2015-2022 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,6 +17,7 @@ limitations under the License. package types import ( + "net/url" "reflect" "strings" "time" @@ -53,7 +54,7 @@ func (r ManagedObjectReference) String() string { func (r *ManagedObjectReference) FromString(o string) bool { s := strings.SplitN(o, ":", 2) - if len(s) < 2 { + if len(s) != 2 { return false } @@ -63,6 +64,11 @@ func (r *ManagedObjectReference) FromString(o string) bool { return true } +// Encode ManagedObjectReference for use with URL and File paths +func (r ManagedObjectReference) Encode() string { + return strings.Join([]string{r.Type, url.QueryEscape(r.Value)}, "-") +} + func (c *PerfCounterInfo) Name() string { return c.GroupInfo.GetElementDescription().Key + "." + c.NameInfo.GetElementDescription().Key + "." + string(c.RollupType) } @@ -88,6 +94,221 @@ func DefaultResourceConfigSpec() ResourceConfigSpec { } } +// ToConfigSpec returns a VirtualMachineConfigSpec based on the +// VirtualMachineConfigInfo. +func (ci VirtualMachineConfigInfo) ToConfigSpec() VirtualMachineConfigSpec { + cs := VirtualMachineConfigSpec{ + ChangeVersion: ci.ChangeVersion, + Name: ci.Name, + Version: ci.Version, + CreateDate: ci.CreateDate, + Uuid: ci.Uuid, + InstanceUuid: ci.InstanceUuid, + NpivNodeWorldWideName: ci.NpivNodeWorldWideName, + NpivPortWorldWideName: ci.NpivPortWorldWideName, + NpivWorldWideNameType: ci.NpivWorldWideNameType, + NpivDesiredNodeWwns: ci.NpivDesiredNodeWwns, + NpivDesiredPortWwns: ci.NpivDesiredPortWwns, + NpivTemporaryDisabled: ci.NpivTemporaryDisabled, + NpivOnNonRdmDisks: ci.NpivOnNonRdmDisks, + LocationId: ci.LocationId, + GuestId: ci.GuestId, + AlternateGuestName: ci.AlternateGuestName, + Annotation: ci.Annotation, + Files: &ci.Files, + Tools: ci.Tools, + Flags: &ci.Flags, + ConsolePreferences: ci.ConsolePreferences, + PowerOpInfo: &ci.DefaultPowerOps, + NumCPUs: ci.Hardware.NumCPU, + VcpuConfig: ci.VcpuConfig, + NumCoresPerSocket: ci.Hardware.NumCoresPerSocket, + MemoryMB: int64(ci.Hardware.MemoryMB), + MemoryHotAddEnabled: ci.MemoryHotAddEnabled, + CpuHotAddEnabled: ci.CpuHotAddEnabled, + CpuHotRemoveEnabled: ci.CpuHotRemoveEnabled, + VirtualICH7MPresent: ci.Hardware.VirtualICH7MPresent, + VirtualSMCPresent: ci.Hardware.VirtualSMCPresent, + DeviceChange: make([]BaseVirtualDeviceConfigSpec, len(ci.Hardware.Device)), + CpuAllocation: ci.CpuAllocation, + MemoryAllocation: ci.MemoryAllocation, + LatencySensitivity: ci.LatencySensitivity, + CpuAffinity: ci.CpuAffinity, + MemoryAffinity: ci.MemoryAffinity, + NetworkShaper: ci.NetworkShaper, + CpuFeatureMask: make([]VirtualMachineCpuIdInfoSpec, len(ci.CpuFeatureMask)), + ExtraConfig: ci.ExtraConfig, + SwapPlacement: ci.SwapPlacement, + BootOptions: ci.BootOptions, + FtInfo: ci.FtInfo, + RepConfig: ci.RepConfig, + VAssertsEnabled: ci.VAssertsEnabled, + ChangeTrackingEnabled: ci.ChangeTrackingEnabled, + Firmware: ci.Firmware, + MaxMksConnections: ci.MaxMksConnections, + GuestAutoLockEnabled: ci.GuestAutoLockEnabled, + ManagedBy: ci.ManagedBy, + MemoryReservationLockedToMax: ci.MemoryReservationLockedToMax, + NestedHVEnabled: ci.NestedHVEnabled, + VPMCEnabled: ci.VPMCEnabled, + MessageBusTunnelEnabled: ci.MessageBusTunnelEnabled, + MigrateEncryption: ci.MigrateEncryption, + FtEncryptionMode: ci.FtEncryptionMode, + SevEnabled: ci.SevEnabled, + PmemFailoverEnabled: ci.PmemFailoverEnabled, + Pmem: ci.Pmem, + NpivWorldWideNameOp: ci.NpivWorldWideNameType, + RebootPowerOff: ci.RebootPowerOff, + ScheduledHardwareUpgradeInfo: ci.ScheduledHardwareUpgradeInfo, + SgxInfo: ci.SgxInfo, + GuestMonitoringModeInfo: ci.GuestMonitoringModeInfo, + VmxStatsCollectionEnabled: ci.VmxStatsCollectionEnabled, + VmOpNotificationToAppEnabled: ci.VmOpNotificationToAppEnabled, + VmOpNotificationTimeout: ci.VmOpNotificationTimeout, + DeviceSwap: ci.DeviceSwap, + SimultaneousThreads: ci.Hardware.SimultaneousThreads, + DeviceGroups: ci.DeviceGroups, + MotherboardLayout: ci.Hardware.MotherboardLayout, + } + + // Unassign the Files field if all of its fields are empty. + if ci.Files.FtMetadataDirectory == "" && ci.Files.LogDirectory == "" && + ci.Files.SnapshotDirectory == "" && ci.Files.SuspendDirectory == "" && + ci.Files.VmPathName == "" { + cs.Files = nil + } + + // Unassign the Flags field if all of its fields are empty. + if ci.Flags.CbrcCacheEnabled == nil && + ci.Flags.DisableAcceleration == nil && + ci.Flags.DiskUuidEnabled == nil && + ci.Flags.EnableLogging == nil && + ci.Flags.FaultToleranceType == "" && + ci.Flags.HtSharing == "" && + ci.Flags.MonitorType == "" && + ci.Flags.RecordReplayEnabled == nil && + ci.Flags.RunWithDebugInfo == nil && + ci.Flags.SnapshotDisabled == nil && + ci.Flags.SnapshotLocked == nil && + ci.Flags.SnapshotPowerOffBehavior == "" && + ci.Flags.UseToe == nil && + ci.Flags.VbsEnabled == nil && + ci.Flags.VirtualExecUsage == "" && + ci.Flags.VirtualMmuUsage == "" && + ci.Flags.VvtdEnabled == nil { + cs.Flags = nil + } + + // Unassign the PowerOps field if all of its fields are empty. + if ci.DefaultPowerOps.DefaultPowerOffType == "" && + ci.DefaultPowerOps.DefaultResetType == "" && + ci.DefaultPowerOps.DefaultSuspendType == "" && + ci.DefaultPowerOps.PowerOffType == "" && + ci.DefaultPowerOps.ResetType == "" && + ci.DefaultPowerOps.StandbyAction == "" && + ci.DefaultPowerOps.SuspendType == "" { + cs.PowerOpInfo = nil + } + + for i := 0; i < len(cs.CpuFeatureMask); i++ { + cs.CpuFeatureMask[i] = VirtualMachineCpuIdInfoSpec{ + ArrayUpdateSpec: ArrayUpdateSpec{ + Operation: ArrayUpdateOperationAdd, + }, + Info: &HostCpuIdInfo{ + // TODO: Does DynamicData need to be copied? + // It is an empty struct... + Level: ci.CpuFeatureMask[i].Level, + Vendor: ci.CpuFeatureMask[i].Vendor, + Eax: ci.CpuFeatureMask[i].Eax, + Ebx: ci.CpuFeatureMask[i].Ebx, + Ecx: ci.CpuFeatureMask[i].Ecx, + Edx: ci.CpuFeatureMask[i].Edx, + }, + } + } + + for i := 0; i < len(cs.DeviceChange); i++ { + cs.DeviceChange[i] = &VirtualDeviceConfigSpec{ + // TODO: Does DynamicData need to be copied? + // It is an empty struct... + Operation: VirtualDeviceConfigSpecOperationAdd, + FileOperation: VirtualDeviceConfigSpecFileOperationCreate, + Device: ci.Hardware.Device[i], + // TODO: It is unclear how the profiles associated with the VM or + // its hardware can be reintroduced/persisted in the + // ConfigSpec. + Profile: nil, + // The backing will come from the device. + Backing: nil, + // TODO: Investigate futher. + FilterSpec: nil, + } + } + + if ni := ci.NumaInfo; ni != nil { + cs.VirtualNuma = &VirtualMachineVirtualNuma{ + CoresPerNumaNode: ni.CoresPerNumaNode, + ExposeVnumaOnCpuHotadd: ni.VnumaOnCpuHotaddExposed, + } + } + + if civa, ok := ci.VAppConfig.(*VmConfigInfo); ok { + var csva VmConfigSpec + + csva.Eula = civa.Eula + csva.InstallBootRequired = &civa.InstallBootRequired + csva.InstallBootStopDelay = civa.InstallBootStopDelay + + ipAssignment := civa.IpAssignment + csva.IpAssignment = &ipAssignment + + csva.OvfEnvironmentTransport = civa.OvfEnvironmentTransport + for i := range civa.OvfSection { + s := civa.OvfSection[i] + csva.OvfSection = append( + csva.OvfSection, + VAppOvfSectionSpec{ + ArrayUpdateSpec: ArrayUpdateSpec{ + Operation: ArrayUpdateOperationAdd, + }, + Info: &s, + }, + ) + } + + for i := range civa.Product { + p := civa.Product[i] + csva.Product = append( + csva.Product, + VAppProductSpec{ + ArrayUpdateSpec: ArrayUpdateSpec{ + Operation: ArrayUpdateOperationAdd, + }, + Info: &p, + }, + ) + } + + for i := range civa.Property { + p := civa.Property[i] + csva.Property = append( + csva.Property, + VAppPropertySpec{ + ArrayUpdateSpec: ArrayUpdateSpec{ + Operation: ArrayUpdateOperationAdd, + }, + Info: &p, + }, + ) + } + + cs.VAppConfig = &csva + } + + return cs +} + func init() { // Known 6.5 issue where this event type is sent even though it is internal. // This workaround allows us to unmarshal and avoid NPEs. diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/types/if.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/types/if.go index 7576198e639c..03ba1f37d447 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/types/if.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/types/if.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2014-2018 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2022 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -214,6 +214,18 @@ func init() { t["BaseClusterAction"] = reflect.TypeOf((*ClusterAction)(nil)).Elem() } +func (b *ClusterComputeResourceValidationResultBase) GetClusterComputeResourceValidationResultBase() *ClusterComputeResourceValidationResultBase { + return b +} + +type BaseClusterComputeResourceValidationResultBase interface { + GetClusterComputeResourceValidationResultBase() *ClusterComputeResourceValidationResultBase +} + +func init() { + t["BaseClusterComputeResourceValidationResultBase"] = reflect.TypeOf((*ClusterComputeResourceValidationResultBase)(nil)).Elem() +} + func (b *ClusterDasAdmissionControlInfo) GetClusterDasAdmissionControlInfo() *ClusterDasAdmissionControlInfo { return b } @@ -786,7 +798,6 @@ func (b *DvsFilterConfig) GetDvsFilterConfig() *DvsFilterConfig { return b } type BaseDvsFilterConfig interface { GetDvsFilterConfig() *DvsFilterConfig - GetDvsTrafficFilterConfig() *DvsTrafficFilterConfig } func init() { @@ -829,21 +840,12 @@ func (b *DvsNetworkRuleQualifier) GetDvsNetworkRuleQualifier() *DvsNetworkRuleQu type BaseDvsNetworkRuleQualifier interface { GetDvsNetworkRuleQualifier() *DvsNetworkRuleQualifier - GetDvsIpNetworkRuleQualifier() *DvsIpNetworkRuleQualifier } func init() { t["BaseDvsNetworkRuleQualifier"] = reflect.TypeOf((*DvsNetworkRuleQualifier)(nil)).Elem() } -func (b *DvsIpNetworkRuleQualifier) GetDvsIpNetworkRuleQualifier() *DvsIpNetworkRuleQualifier { - return b -} - -type BaseDvsIpNetworkRuleQualifier interface { - GetDvsIpNetworkRuleQualifier() *DvsIpNetworkRuleQualifier -} - func (b *DvsTrafficFilterConfig) GetDvsTrafficFilterConfig() *DvsTrafficFilterConfig { return b } type BaseDvsTrafficFilterConfig interface { @@ -1254,6 +1256,18 @@ func init() { t["BaseHostDasEvent"] = reflect.TypeOf((*HostDasEvent)(nil)).Elem() } +func (b *HostDataTransportConnectionInfo) GetHostDataTransportConnectionInfo() *HostDataTransportConnectionInfo { + return b +} + +type BaseHostDataTransportConnectionInfo interface { + GetHostDataTransportConnectionInfo() *HostDataTransportConnectionInfo +} + +func init() { + t["BaseHostDataTransportConnectionInfo"] = reflect.TypeOf((*HostDataTransportConnectionInfo)(nil)).Elem() +} + func (b *HostDatastoreConnectInfo) GetHostDatastoreConnectInfo() *HostDatastoreConnectInfo { return b } type BaseHostDatastoreConnectInfo interface { @@ -1356,6 +1370,16 @@ func init() { t["BaseHostHardwareElementInfo"] = reflect.TypeOf((*HostHardwareElementInfo)(nil)).Elem() } +func (b *HostHbaCreateSpec) GetHostHbaCreateSpec() *HostHbaCreateSpec { return b } + +type BaseHostHbaCreateSpec interface { + GetHostHbaCreateSpec() *HostHbaCreateSpec +} + +func init() { + t["BaseHostHbaCreateSpec"] = reflect.TypeOf((*HostHbaCreateSpec)(nil)).Elem() +} + func (b *HostHostBusAdapter) GetHostHostBusAdapter() *HostHostBusAdapter { return b } type BaseHostHostBusAdapter interface { @@ -1412,6 +1436,28 @@ func init() { t["BaseHostMultipathInfoLogicalUnitPolicy"] = reflect.TypeOf((*HostMultipathInfoLogicalUnitPolicy)(nil)).Elem() } +func (b *HostNvmeSpec) GetHostNvmeSpec() *HostNvmeSpec { return b } + +type BaseHostNvmeSpec interface { + GetHostNvmeSpec() *HostNvmeSpec +} + +func init() { + t["BaseHostNvmeSpec"] = reflect.TypeOf((*HostNvmeSpec)(nil)).Elem() +} + +func (b *HostNvmeTransportParameters) GetHostNvmeTransportParameters() *HostNvmeTransportParameters { + return b +} + +type BaseHostNvmeTransportParameters interface { + GetHostNvmeTransportParameters() *HostNvmeTransportParameters +} + +func init() { + t["BaseHostNvmeTransportParameters"] = reflect.TypeOf((*HostNvmeTransportParameters)(nil)).Elem() +} + func (b *HostPciPassthruConfig) GetHostPciPassthruConfig() *HostPciPassthruConfig { return b } type BaseHostPciPassthruConfig interface { @@ -1464,6 +1510,16 @@ func init() { t["BaseHostProfilesEntityCustomizations"] = reflect.TypeOf((*HostProfilesEntityCustomizations)(nil)).Elem() } +func (b *HostRdmaDeviceBacking) GetHostRdmaDeviceBacking() *HostRdmaDeviceBacking { return b } + +type BaseHostRdmaDeviceBacking interface { + GetHostRdmaDeviceBacking() *HostRdmaDeviceBacking +} + +func init() { + t["BaseHostRdmaDeviceBacking"] = reflect.TypeOf((*HostRdmaDeviceBacking)(nil)).Elem() +} + func (b *HostSriovDevicePoolInfo) GetHostSriovDevicePoolInfo() *HostSriovDevicePoolInfo { return b } type BaseHostSriovDevicePoolInfo interface { @@ -1496,6 +1552,18 @@ func init() { t["BaseHostTargetTransport"] = reflect.TypeOf((*HostTargetTransport)(nil)).Elem() } +func (b *HostTpmBootSecurityOptionEventDetails) GetHostTpmBootSecurityOptionEventDetails() *HostTpmBootSecurityOptionEventDetails { + return b +} + +type BaseHostTpmBootSecurityOptionEventDetails interface { + GetHostTpmBootSecurityOptionEventDetails() *HostTpmBootSecurityOptionEventDetails +} + +func init() { + t["BaseHostTpmBootSecurityOptionEventDetails"] = reflect.TypeOf((*HostTpmBootSecurityOptionEventDetails)(nil)).Elem() +} + func (b *HostTpmEventDetails) GetHostTpmEventDetails() *HostTpmEventDetails { return b } type BaseHostTpmEventDetails interface { @@ -2966,6 +3034,18 @@ func init() { t["BaseVirtualHardwareCompatibilityIssue"] = reflect.TypeOf((*VirtualHardwareCompatibilityIssue)(nil)).Elem() } +func (b *VirtualMachineBaseIndependentFilterSpec) GetVirtualMachineBaseIndependentFilterSpec() *VirtualMachineBaseIndependentFilterSpec { + return b +} + +type BaseVirtualMachineBaseIndependentFilterSpec interface { + GetVirtualMachineBaseIndependentFilterSpec() *VirtualMachineBaseIndependentFilterSpec +} + +func init() { + t["BaseVirtualMachineBaseIndependentFilterSpec"] = reflect.TypeOf((*VirtualMachineBaseIndependentFilterSpec)(nil)).Elem() +} + func (b *VirtualMachineBootOptionsBootableDevice) GetVirtualMachineBootOptionsBootableDevice() *VirtualMachineBootOptionsBootableDevice { return b } @@ -2978,6 +3058,16 @@ func init() { t["BaseVirtualMachineBootOptionsBootableDevice"] = reflect.TypeOf((*VirtualMachineBootOptionsBootableDevice)(nil)).Elem() } +func (b *VirtualMachineConnection) GetVirtualMachineConnection() *VirtualMachineConnection { return b } + +type BaseVirtualMachineConnection interface { + GetVirtualMachineConnection() *VirtualMachineConnection +} + +func init() { + t["BaseVirtualMachineConnection"] = reflect.TypeOf((*VirtualMachineConnection)(nil)).Elem() +} + func (b *VirtualMachineDeviceRuntimeInfoDeviceRuntimeState) GetVirtualMachineDeviceRuntimeInfoDeviceRuntimeState() *VirtualMachineDeviceRuntimeInfoDeviceRuntimeState { return b } @@ -3060,6 +3150,18 @@ func init() { t["BaseVirtualMachineTargetInfo"] = reflect.TypeOf((*VirtualMachineTargetInfo)(nil)).Elem() } +func (b *VirtualMachineVirtualDeviceGroupsDeviceGroup) GetVirtualMachineVirtualDeviceGroupsDeviceGroup() *VirtualMachineVirtualDeviceGroupsDeviceGroup { + return b +} + +type BaseVirtualMachineVirtualDeviceGroupsDeviceGroup interface { + GetVirtualMachineVirtualDeviceGroupsDeviceGroup() *VirtualMachineVirtualDeviceGroupsDeviceGroup +} + +func init() { + t["BaseVirtualMachineVirtualDeviceGroupsDeviceGroup"] = reflect.TypeOf((*VirtualMachineVirtualDeviceGroupsDeviceGroup)(nil)).Elem() +} + func (b *VirtualPCIPassthroughPluginBackingInfo) GetVirtualPCIPassthroughPluginBackingInfo() *VirtualPCIPassthroughPluginBackingInfo { return b } diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/types/types.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/types/types.go index e990e273e222..f7ae195f79f7 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/types/types.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/types/types.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2014-2018 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2022 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,6 +21,23 @@ import ( "time" ) +type AbandonHciWorkflow AbandonHciWorkflowRequestType + +func init() { + t["AbandonHciWorkflow"] = reflect.TypeOf((*AbandonHciWorkflow)(nil)).Elem() +} + +type AbandonHciWorkflowRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["AbandonHciWorkflowRequestType"] = reflect.TypeOf((*AbandonHciWorkflowRequestType)(nil)).Elem() +} + +type AbandonHciWorkflowResponse struct { +} + type AbdicateDomOwnership AbdicateDomOwnershipRequestType func init() { @@ -40,6 +57,26 @@ type AbdicateDomOwnershipResponse struct { Returnval []string `xml:"returnval,omitempty"` } +type AbortCustomizationRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` +} + +func init() { + t["AbortCustomizationRequestType"] = reflect.TypeOf((*AbortCustomizationRequestType)(nil)).Elem() +} + +type AbortCustomization_Task AbortCustomizationRequestType + +func init() { + t["AbortCustomization_Task"] = reflect.TypeOf((*AbortCustomization_Task)(nil)).Elem() +} + +type AbortCustomization_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + type AboutInfo struct { DynamicData @@ -47,6 +84,7 @@ type AboutInfo struct { FullName string `xml:"fullName"` Vendor string `xml:"vendor"` Version string `xml:"version"` + PatchLevel string `xml:"patchLevel,omitempty"` Build string `xml:"build"` LocaleVersion string `xml:"localeVersion,omitempty"` LocaleBuild string `xml:"localeBuild,omitempty"` @@ -1028,6 +1066,7 @@ type AlarmState struct { AcknowledgedByUser string `xml:"acknowledgedByUser,omitempty"` AcknowledgedTime *time.Time `xml:"acknowledgedTime"` EventKey int32 `xml:"eventKey,omitempty"` + Disabled *bool `xml:"disabled"` } func init() { @@ -1709,6 +1748,70 @@ func init() { t["ArrayOfClusterAttemptedVmInfo"] = reflect.TypeOf((*ArrayOfClusterAttemptedVmInfo)(nil)).Elem() } +type ArrayOfClusterComputeResourceDVSSetting struct { + ClusterComputeResourceDVSSetting []ClusterComputeResourceDVSSetting `xml:"ClusterComputeResourceDVSSetting,omitempty"` +} + +func init() { + t["ArrayOfClusterComputeResourceDVSSetting"] = reflect.TypeOf((*ArrayOfClusterComputeResourceDVSSetting)(nil)).Elem() +} + +type ArrayOfClusterComputeResourceDVSSettingDVPortgroupToServiceMapping struct { + ClusterComputeResourceDVSSettingDVPortgroupToServiceMapping []ClusterComputeResourceDVSSettingDVPortgroupToServiceMapping `xml:"ClusterComputeResourceDVSSettingDVPortgroupToServiceMapping,omitempty"` +} + +func init() { + t["ArrayOfClusterComputeResourceDVSSettingDVPortgroupToServiceMapping"] = reflect.TypeOf((*ArrayOfClusterComputeResourceDVSSettingDVPortgroupToServiceMapping)(nil)).Elem() +} + +type ArrayOfClusterComputeResourceDvsProfile struct { + ClusterComputeResourceDvsProfile []ClusterComputeResourceDvsProfile `xml:"ClusterComputeResourceDvsProfile,omitempty"` +} + +func init() { + t["ArrayOfClusterComputeResourceDvsProfile"] = reflect.TypeOf((*ArrayOfClusterComputeResourceDvsProfile)(nil)).Elem() +} + +type ArrayOfClusterComputeResourceDvsProfileDVPortgroupSpecToServiceMapping struct { + ClusterComputeResourceDvsProfileDVPortgroupSpecToServiceMapping []ClusterComputeResourceDvsProfileDVPortgroupSpecToServiceMapping `xml:"ClusterComputeResourceDvsProfileDVPortgroupSpecToServiceMapping,omitempty"` +} + +func init() { + t["ArrayOfClusterComputeResourceDvsProfileDVPortgroupSpecToServiceMapping"] = reflect.TypeOf((*ArrayOfClusterComputeResourceDvsProfileDVPortgroupSpecToServiceMapping)(nil)).Elem() +} + +type ArrayOfClusterComputeResourceHostConfigurationInput struct { + ClusterComputeResourceHostConfigurationInput []ClusterComputeResourceHostConfigurationInput `xml:"ClusterComputeResourceHostConfigurationInput,omitempty"` +} + +func init() { + t["ArrayOfClusterComputeResourceHostConfigurationInput"] = reflect.TypeOf((*ArrayOfClusterComputeResourceHostConfigurationInput)(nil)).Elem() +} + +type ArrayOfClusterComputeResourceHostVmkNicInfo struct { + ClusterComputeResourceHostVmkNicInfo []ClusterComputeResourceHostVmkNicInfo `xml:"ClusterComputeResourceHostVmkNicInfo,omitempty"` +} + +func init() { + t["ArrayOfClusterComputeResourceHostVmkNicInfo"] = reflect.TypeOf((*ArrayOfClusterComputeResourceHostVmkNicInfo)(nil)).Elem() +} + +type ArrayOfClusterComputeResourceValidationResultBase struct { + ClusterComputeResourceValidationResultBase []BaseClusterComputeResourceValidationResultBase `xml:"ClusterComputeResourceValidationResultBase,omitempty,typeattr"` +} + +func init() { + t["ArrayOfClusterComputeResourceValidationResultBase"] = reflect.TypeOf((*ArrayOfClusterComputeResourceValidationResultBase)(nil)).Elem() +} + +type ArrayOfClusterComputeResourceVcsSlots struct { + ClusterComputeResourceVcsSlots []ClusterComputeResourceVcsSlots `xml:"ClusterComputeResourceVcsSlots,omitempty"` +} + +func init() { + t["ArrayOfClusterComputeResourceVcsSlots"] = reflect.TypeOf((*ArrayOfClusterComputeResourceVcsSlots)(nil)).Elem() +} + type ArrayOfClusterDasAamNodeState struct { ClusterDasAamNodeState []ClusterDasAamNodeState `xml:"ClusterDasAamNodeState,omitempty"` } @@ -1749,6 +1852,14 @@ func init() { t["ArrayOfClusterDasVmConfigSpec"] = reflect.TypeOf((*ArrayOfClusterDasVmConfigSpec)(nil)).Elem() } +type ArrayOfClusterDatastoreUpdateSpec struct { + ClusterDatastoreUpdateSpec []ClusterDatastoreUpdateSpec `xml:"ClusterDatastoreUpdateSpec,omitempty"` +} + +func init() { + t["ArrayOfClusterDatastoreUpdateSpec"] = reflect.TypeOf((*ArrayOfClusterDatastoreUpdateSpec)(nil)).Elem() +} + type ArrayOfClusterDpmHostConfigInfo struct { ClusterDpmHostConfigInfo []ClusterDpmHostConfigInfo `xml:"ClusterDpmHostConfigInfo,omitempty"` } @@ -1893,6 +2004,14 @@ func init() { t["ArrayOfClusterRuleSpec"] = reflect.TypeOf((*ArrayOfClusterRuleSpec)(nil)).Elem() } +type ArrayOfClusterTagCategoryUpdateSpec struct { + ClusterTagCategoryUpdateSpec []ClusterTagCategoryUpdateSpec `xml:"ClusterTagCategoryUpdateSpec,omitempty"` +} + +func init() { + t["ArrayOfClusterTagCategoryUpdateSpec"] = reflect.TypeOf((*ArrayOfClusterTagCategoryUpdateSpec)(nil)).Elem() +} + type ArrayOfClusterVmOrchestrationInfo struct { ClusterVmOrchestrationInfo []ClusterVmOrchestrationInfo `xml:"ClusterVmOrchestrationInfo,omitempty"` } @@ -1989,6 +2108,14 @@ func init() { t["ArrayOfCryptoManagerKmipClusterStatus"] = reflect.TypeOf((*ArrayOfCryptoManagerKmipClusterStatus)(nil)).Elem() } +type ArrayOfCryptoManagerKmipCryptoKeyStatus struct { + CryptoManagerKmipCryptoKeyStatus []CryptoManagerKmipCryptoKeyStatus `xml:"CryptoManagerKmipCryptoKeyStatus,omitempty"` +} + +func init() { + t["ArrayOfCryptoManagerKmipCryptoKeyStatus"] = reflect.TypeOf((*ArrayOfCryptoManagerKmipCryptoKeyStatus)(nil)).Elem() +} + type ArrayOfCryptoManagerKmipServerStatus struct { CryptoManagerKmipServerStatus []CryptoManagerKmipServerStatus `xml:"CryptoManagerKmipServerStatus,omitempty"` } @@ -2061,6 +2188,14 @@ func init() { t["ArrayOfDVSHealthCheckConfig"] = reflect.TypeOf((*ArrayOfDVSHealthCheckConfig)(nil)).Elem() } +type ArrayOfDVSManagerPhysicalNicsList struct { + DVSManagerPhysicalNicsList []DVSManagerPhysicalNicsList `xml:"DVSManagerPhysicalNicsList,omitempty"` +} + +func init() { + t["ArrayOfDVSManagerPhysicalNicsList"] = reflect.TypeOf((*ArrayOfDVSManagerPhysicalNicsList)(nil)).Elem() +} + type ArrayOfDVSNetworkResourcePool struct { DVSNetworkResourcePool []DVSNetworkResourcePool `xml:"DVSNetworkResourcePool,omitempty"` } @@ -2093,6 +2228,14 @@ func init() { t["ArrayOfDasHeartbeatDatastoreInfo"] = reflect.TypeOf((*ArrayOfDasHeartbeatDatastoreInfo)(nil)).Elem() } +type ArrayOfDatacenterBasicConnectInfo struct { + DatacenterBasicConnectInfo []DatacenterBasicConnectInfo `xml:"DatacenterBasicConnectInfo,omitempty"` +} + +func init() { + t["ArrayOfDatacenterBasicConnectInfo"] = reflect.TypeOf((*ArrayOfDatacenterBasicConnectInfo)(nil)).Elem() +} + type ArrayOfDatacenterMismatchArgument struct { DatacenterMismatchArgument []DatacenterMismatchArgument `xml:"DatacenterMismatchArgument,omitempty"` } @@ -2125,6 +2268,14 @@ func init() { t["ArrayOfDatastoreVVolContainerFailoverPair"] = reflect.TypeOf((*ArrayOfDatastoreVVolContainerFailoverPair)(nil)).Elem() } +type ArrayOfDesiredSoftwareSpecComponentSpec struct { + DesiredSoftwareSpecComponentSpec []DesiredSoftwareSpecComponentSpec `xml:"DesiredSoftwareSpecComponentSpec,omitempty"` +} + +func init() { + t["ArrayOfDesiredSoftwareSpecComponentSpec"] = reflect.TypeOf((*ArrayOfDesiredSoftwareSpecComponentSpec)(nil)).Elem() +} + type ArrayOfDiagnosticManagerBundleInfo struct { DiagnosticManagerBundleInfo []DiagnosticManagerBundleInfo `xml:"DiagnosticManagerBundleInfo,omitempty"` } @@ -2165,6 +2316,14 @@ func init() { t["ArrayOfDistributedVirtualPortgroupInfo"] = reflect.TypeOf((*ArrayOfDistributedVirtualPortgroupInfo)(nil)).Elem() } +type ArrayOfDistributedVirtualPortgroupProblem struct { + DistributedVirtualPortgroupProblem []DistributedVirtualPortgroupProblem `xml:"DistributedVirtualPortgroupProblem,omitempty"` +} + +func init() { + t["ArrayOfDistributedVirtualPortgroupProblem"] = reflect.TypeOf((*ArrayOfDistributedVirtualPortgroupProblem)(nil)).Elem() +} + type ArrayOfDistributedVirtualSwitchHostMember struct { DistributedVirtualSwitchHostMember []DistributedVirtualSwitchHostMember `xml:"DistributedVirtualSwitchHostMember,omitempty"` } @@ -2189,6 +2348,14 @@ func init() { t["ArrayOfDistributedVirtualSwitchHostMemberPnicSpec"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchHostMemberPnicSpec)(nil)).Elem() } +type ArrayOfDistributedVirtualSwitchHostMemberTransportZoneInfo struct { + DistributedVirtualSwitchHostMemberTransportZoneInfo []DistributedVirtualSwitchHostMemberTransportZoneInfo `xml:"DistributedVirtualSwitchHostMemberTransportZoneInfo,omitempty"` +} + +func init() { + t["ArrayOfDistributedVirtualSwitchHostMemberTransportZoneInfo"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchHostMemberTransportZoneInfo)(nil)).Elem() +} + type ArrayOfDistributedVirtualSwitchHostProductSpec struct { DistributedVirtualSwitchHostProductSpec []DistributedVirtualSwitchHostProductSpec `xml:"DistributedVirtualSwitchHostProductSpec,omitempty"` } @@ -2229,6 +2396,14 @@ func init() { t["ArrayOfDistributedVirtualSwitchManagerHostDvsFilterSpec"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchManagerHostDvsFilterSpec)(nil)).Elem() } +type ArrayOfDistributedVirtualSwitchNetworkOffloadSpec struct { + DistributedVirtualSwitchNetworkOffloadSpec []DistributedVirtualSwitchNetworkOffloadSpec `xml:"DistributedVirtualSwitchNetworkOffloadSpec,omitempty"` +} + +func init() { + t["ArrayOfDistributedVirtualSwitchNetworkOffloadSpec"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchNetworkOffloadSpec)(nil)).Elem() +} + type ArrayOfDistributedVirtualSwitchProductSpec struct { DistributedVirtualSwitchProductSpec []DistributedVirtualSwitchProductSpec `xml:"DistributedVirtualSwitchProductSpec,omitempty"` } @@ -2245,6 +2420,22 @@ func init() { t["ArrayOfDouble"] = reflect.TypeOf((*ArrayOfDouble)(nil)).Elem() } +type ArrayOfDpuStatusInfo struct { + DpuStatusInfo []DpuStatusInfo `xml:"DpuStatusInfo,omitempty"` +} + +func init() { + t["ArrayOfDpuStatusInfo"] = reflect.TypeOf((*ArrayOfDpuStatusInfo)(nil)).Elem() +} + +type ArrayOfDpuStatusInfoOperationalInfo struct { + DpuStatusInfoOperationalInfo []DpuStatusInfoOperationalInfo `xml:"DpuStatusInfoOperationalInfo,omitempty"` +} + +func init() { + t["ArrayOfDpuStatusInfoOperationalInfo"] = reflect.TypeOf((*ArrayOfDpuStatusInfoOperationalInfo)(nil)).Elem() +} + type ArrayOfDvsApplyOperationFaultFaultOnObject struct { DvsApplyOperationFaultFaultOnObject []DvsApplyOperationFaultFaultOnObject `xml:"DvsApplyOperationFaultFaultOnObject,omitempty"` } @@ -2557,6 +2748,14 @@ func init() { t["ArrayOfFcoeConfigVlanRange"] = reflect.TypeOf((*ArrayOfFcoeConfigVlanRange)(nil)).Elem() } +type ArrayOfFeatureEVCMode struct { + FeatureEVCMode []FeatureEVCMode `xml:"FeatureEVCMode,omitempty"` +} + +func init() { + t["ArrayOfFeatureEVCMode"] = reflect.TypeOf((*ArrayOfFeatureEVCMode)(nil)).Elem() +} + type ArrayOfFileInfo struct { FileInfo []BaseFileInfo `xml:"FileInfo,omitempty,typeattr"` } @@ -2581,6 +2780,22 @@ func init() { t["ArrayOfFirewallProfileRulesetProfile"] = reflect.TypeOf((*ArrayOfFirewallProfileRulesetProfile)(nil)).Elem() } +type ArrayOfFolderFailedHostResult struct { + FolderFailedHostResult []FolderFailedHostResult `xml:"FolderFailedHostResult,omitempty"` +} + +func init() { + t["ArrayOfFolderFailedHostResult"] = reflect.TypeOf((*ArrayOfFolderFailedHostResult)(nil)).Elem() +} + +type ArrayOfFolderNewHostSpec struct { + FolderNewHostSpec []FolderNewHostSpec `xml:"FolderNewHostSpec,omitempty"` +} + +func init() { + t["ArrayOfFolderNewHostSpec"] = reflect.TypeOf((*ArrayOfFolderNewHostSpec)(nil)).Elem() +} + type ArrayOfGuestAliases struct { GuestAliases []GuestAliases `xml:"GuestAliases,omitempty"` } @@ -2629,6 +2844,14 @@ func init() { t["ArrayOfGuestInfoNamespaceGenerationInfo"] = reflect.TypeOf((*ArrayOfGuestInfoNamespaceGenerationInfo)(nil)).Elem() } +type ArrayOfGuestInfoVirtualDiskMapping struct { + GuestInfoVirtualDiskMapping []GuestInfoVirtualDiskMapping `xml:"GuestInfoVirtualDiskMapping,omitempty"` +} + +func init() { + t["ArrayOfGuestInfoVirtualDiskMapping"] = reflect.TypeOf((*ArrayOfGuestInfoVirtualDiskMapping)(nil)).Elem() +} + type ArrayOfGuestMappedAliases struct { GuestMappedAliases []GuestMappedAliases `xml:"GuestMappedAliases,omitempty"` } @@ -2733,6 +2956,22 @@ func init() { t["ArrayOfHostActiveDirectory"] = reflect.TypeOf((*ArrayOfHostActiveDirectory)(nil)).Elem() } +type ArrayOfHostAssignableHardwareBinding struct { + HostAssignableHardwareBinding []HostAssignableHardwareBinding `xml:"HostAssignableHardwareBinding,omitempty"` +} + +func init() { + t["ArrayOfHostAssignableHardwareBinding"] = reflect.TypeOf((*ArrayOfHostAssignableHardwareBinding)(nil)).Elem() +} + +type ArrayOfHostAssignableHardwareConfigAttributeOverride struct { + HostAssignableHardwareConfigAttributeOverride []HostAssignableHardwareConfigAttributeOverride `xml:"HostAssignableHardwareConfigAttributeOverride,omitempty"` +} + +func init() { + t["ArrayOfHostAssignableHardwareConfigAttributeOverride"] = reflect.TypeOf((*ArrayOfHostAssignableHardwareConfigAttributeOverride)(nil)).Elem() +} + type ArrayOfHostAuthenticationStoreInfo struct { HostAuthenticationStoreInfo []BaseHostAuthenticationStoreInfo `xml:"HostAuthenticationStoreInfo,omitempty,typeattr"` } @@ -2765,6 +3004,14 @@ func init() { t["ArrayOfHostConnectInfoNetworkInfo"] = reflect.TypeOf((*ArrayOfHostConnectInfoNetworkInfo)(nil)).Elem() } +type ArrayOfHostConnectSpec struct { + HostConnectSpec []HostConnectSpec `xml:"HostConnectSpec,omitempty"` +} + +func init() { + t["ArrayOfHostConnectSpec"] = reflect.TypeOf((*ArrayOfHostConnectSpec)(nil)).Elem() +} + type ArrayOfHostCpuIdInfo struct { HostCpuIdInfo []HostCpuIdInfo `xml:"HostCpuIdInfo,omitempty"` } @@ -2885,6 +3132,14 @@ func init() { t["ArrayOfHostDiskPartitionInfo"] = reflect.TypeOf((*ArrayOfHostDiskPartitionInfo)(nil)).Elem() } +type ArrayOfHostDvxClass struct { + HostDvxClass []HostDvxClass `xml:"HostDvxClass,omitempty"` +} + +func init() { + t["ArrayOfHostDvxClass"] = reflect.TypeOf((*ArrayOfHostDvxClass)(nil)).Elem() +} + type ArrayOfHostEventArgument struct { HostEventArgument []HostEventArgument `xml:"HostEventArgument,omitempty"` } @@ -3117,6 +3372,14 @@ func init() { t["ArrayOfHostMemberRuntimeInfo"] = reflect.TypeOf((*ArrayOfHostMemberRuntimeInfo)(nil)).Elem() } +type ArrayOfHostMemoryTierInfo struct { + HostMemoryTierInfo []HostMemoryTierInfo `xml:"HostMemoryTierInfo,omitempty"` +} + +func init() { + t["ArrayOfHostMemoryTierInfo"] = reflect.TypeOf((*ArrayOfHostMemoryTierInfo)(nil)).Elem() +} + type ArrayOfHostMultipathInfoLogicalUnit struct { HostMultipathInfoLogicalUnit []HostMultipathInfoLogicalUnit `xml:"HostMultipathInfoLogicalUnit,omitempty"` } @@ -3205,6 +3468,54 @@ func init() { t["ArrayOfHostNumericSensorInfo"] = reflect.TypeOf((*ArrayOfHostNumericSensorInfo)(nil)).Elem() } +type ArrayOfHostNvmeConnectSpec struct { + HostNvmeConnectSpec []HostNvmeConnectSpec `xml:"HostNvmeConnectSpec,omitempty"` +} + +func init() { + t["ArrayOfHostNvmeConnectSpec"] = reflect.TypeOf((*ArrayOfHostNvmeConnectSpec)(nil)).Elem() +} + +type ArrayOfHostNvmeController struct { + HostNvmeController []HostNvmeController `xml:"HostNvmeController,omitempty"` +} + +func init() { + t["ArrayOfHostNvmeController"] = reflect.TypeOf((*ArrayOfHostNvmeController)(nil)).Elem() +} + +type ArrayOfHostNvmeDisconnectSpec struct { + HostNvmeDisconnectSpec []HostNvmeDisconnectSpec `xml:"HostNvmeDisconnectSpec,omitempty"` +} + +func init() { + t["ArrayOfHostNvmeDisconnectSpec"] = reflect.TypeOf((*ArrayOfHostNvmeDisconnectSpec)(nil)).Elem() +} + +type ArrayOfHostNvmeDiscoveryLogEntry struct { + HostNvmeDiscoveryLogEntry []HostNvmeDiscoveryLogEntry `xml:"HostNvmeDiscoveryLogEntry,omitempty"` +} + +func init() { + t["ArrayOfHostNvmeDiscoveryLogEntry"] = reflect.TypeOf((*ArrayOfHostNvmeDiscoveryLogEntry)(nil)).Elem() +} + +type ArrayOfHostNvmeNamespace struct { + HostNvmeNamespace []HostNvmeNamespace `xml:"HostNvmeNamespace,omitempty"` +} + +func init() { + t["ArrayOfHostNvmeNamespace"] = reflect.TypeOf((*ArrayOfHostNvmeNamespace)(nil)).Elem() +} + +type ArrayOfHostNvmeTopologyInterface struct { + HostNvmeTopologyInterface []HostNvmeTopologyInterface `xml:"HostNvmeTopologyInterface,omitempty"` +} + +func init() { + t["ArrayOfHostNvmeTopologyInterface"] = reflect.TypeOf((*ArrayOfHostNvmeTopologyInterface)(nil)).Elem() +} + type ArrayOfHostOpaqueNetworkInfo struct { HostOpaqueNetworkInfo []HostOpaqueNetworkInfo `xml:"HostOpaqueNetworkInfo,omitempty"` } @@ -3437,6 +3748,30 @@ func init() { t["ArrayOfHostProxySwitchHostLagConfig"] = reflect.TypeOf((*ArrayOfHostProxySwitchHostLagConfig)(nil)).Elem() } +type ArrayOfHostPtpConfigPtpPort struct { + HostPtpConfigPtpPort []HostPtpConfigPtpPort `xml:"HostPtpConfigPtpPort,omitempty"` +} + +func init() { + t["ArrayOfHostPtpConfigPtpPort"] = reflect.TypeOf((*ArrayOfHostPtpConfigPtpPort)(nil)).Elem() +} + +type ArrayOfHostQualifiedName struct { + HostQualifiedName []HostQualifiedName `xml:"HostQualifiedName,omitempty"` +} + +func init() { + t["ArrayOfHostQualifiedName"] = reflect.TypeOf((*ArrayOfHostQualifiedName)(nil)).Elem() +} + +type ArrayOfHostRdmaDevice struct { + HostRdmaDevice []HostRdmaDevice `xml:"HostRdmaDevice,omitempty"` +} + +func init() { + t["ArrayOfHostRdmaDevice"] = reflect.TypeOf((*ArrayOfHostRdmaDevice)(nil)).Elem() +} + type ArrayOfHostRuntimeInfoNetStackInstanceRuntimeInfo struct { HostRuntimeInfoNetStackInstanceRuntimeInfo []HostRuntimeInfoNetStackInstanceRuntimeInfo `xml:"HostRuntimeInfoNetStackInstanceRuntimeInfo,omitempty"` } @@ -3629,6 +3964,14 @@ func init() { t["ArrayOfHostTpmEventLogEntry"] = reflect.TypeOf((*ArrayOfHostTpmEventLogEntry)(nil)).Elem() } +type ArrayOfHostTrustAuthorityAttestationInfo struct { + HostTrustAuthorityAttestationInfo []HostTrustAuthorityAttestationInfo `xml:"HostTrustAuthorityAttestationInfo,omitempty"` +} + +func init() { + t["ArrayOfHostTrustAuthorityAttestationInfo"] = reflect.TypeOf((*ArrayOfHostTrustAuthorityAttestationInfo)(nil)).Elem() +} + type ArrayOfHostUnresolvedVmfsExtent struct { HostUnresolvedVmfsExtent []HostUnresolvedVmfsExtent `xml:"HostUnresolvedVmfsExtent,omitempty"` } @@ -3797,6 +4140,14 @@ func init() { t["ArrayOfHttpNfcLeaseManifestEntry"] = reflect.TypeOf((*ArrayOfHttpNfcLeaseManifestEntry)(nil)).Elem() } +type ArrayOfHttpNfcLeaseProbeResult struct { + HttpNfcLeaseProbeResult []HttpNfcLeaseProbeResult `xml:"HttpNfcLeaseProbeResult,omitempty"` +} + +func init() { + t["ArrayOfHttpNfcLeaseProbeResult"] = reflect.TypeOf((*ArrayOfHttpNfcLeaseProbeResult)(nil)).Elem() +} + type ArrayOfHttpNfcLeaseSourceFile struct { HttpNfcLeaseSourceFile []HttpNfcLeaseSourceFile `xml:"HttpNfcLeaseSourceFile,omitempty"` } @@ -4109,6 +4460,14 @@ func init() { t["ArrayOfNetStackInstanceProfile"] = reflect.TypeOf((*ArrayOfNetStackInstanceProfile)(nil)).Elem() } +type ArrayOfNoPermissionEntityPrivileges struct { + NoPermissionEntityPrivileges []NoPermissionEntityPrivileges `xml:"NoPermissionEntityPrivileges,omitempty"` +} + +func init() { + t["ArrayOfNoPermissionEntityPrivileges"] = reflect.TypeOf((*ArrayOfNoPermissionEntityPrivileges)(nil)).Elem() +} + type ArrayOfNsxHostVNicProfile struct { NsxHostVNicProfile []NsxHostVNicProfile `xml:"NsxHostVNicProfile,omitempty"` } @@ -4149,6 +4508,14 @@ func init() { t["ArrayOfNvdimmInterleaveSetInfo"] = reflect.TypeOf((*ArrayOfNvdimmInterleaveSetInfo)(nil)).Elem() } +type ArrayOfNvdimmNamespaceDetails struct { + NvdimmNamespaceDetails []NvdimmNamespaceDetails `xml:"NvdimmNamespaceDetails,omitempty"` +} + +func init() { + t["ArrayOfNvdimmNamespaceDetails"] = reflect.TypeOf((*ArrayOfNvdimmNamespaceDetails)(nil)).Elem() +} + type ArrayOfNvdimmNamespaceInfo struct { NvdimmNamespaceInfo []NvdimmNamespaceInfo `xml:"NvdimmNamespaceInfo,omitempty"` } @@ -4909,6 +5276,14 @@ func init() { t["ArrayOfUpdateVirtualMachineFilesResultFailedVmFileInfo"] = reflect.TypeOf((*ArrayOfUpdateVirtualMachineFilesResultFailedVmFileInfo)(nil)).Elem() } +type ArrayOfUri struct { + Uri []string `xml:"uri,omitempty"` +} + +func init() { + t["ArrayOfUri"] = reflect.TypeOf((*ArrayOfUri)(nil)).Elem() +} + type ArrayOfUsbScanCodeSpecKeyEvent struct { UsbScanCodeSpecKeyEvent []UsbScanCodeSpecKeyEvent `xml:"UsbScanCodeSpecKeyEvent,omitempty"` } @@ -4965,6 +5340,14 @@ func init() { t["ArrayOfVASAStorageArray"] = reflect.TypeOf((*ArrayOfVASAStorageArray)(nil)).Elem() } +type ArrayOfVASAStorageArrayDiscoverySvcInfo struct { + VASAStorageArrayDiscoverySvcInfo []VASAStorageArrayDiscoverySvcInfo `xml:"VASAStorageArrayDiscoverySvcInfo,omitempty"` +} + +func init() { + t["ArrayOfVASAStorageArrayDiscoverySvcInfo"] = reflect.TypeOf((*ArrayOfVASAStorageArrayDiscoverySvcInfo)(nil)).Elem() +} + type ArrayOfVAppCloneSpecNetworkMappingPair struct { VAppCloneSpecNetworkMappingPair []VAppCloneSpecNetworkMappingPair `xml:"VAppCloneSpecNetworkMappingPair,omitempty"` } @@ -5221,6 +5604,14 @@ func init() { t["ArrayOfVirtualDiskRuleSpec"] = reflect.TypeOf((*ArrayOfVirtualDiskRuleSpec)(nil)).Elem() } +type ArrayOfVirtualMachineBaseIndependentFilterSpec struct { + VirtualMachineBaseIndependentFilterSpec []BaseVirtualMachineBaseIndependentFilterSpec `xml:"VirtualMachineBaseIndependentFilterSpec,omitempty,typeattr"` +} + +func init() { + t["ArrayOfVirtualMachineBaseIndependentFilterSpec"] = reflect.TypeOf((*ArrayOfVirtualMachineBaseIndependentFilterSpec)(nil)).Elem() +} + type ArrayOfVirtualMachineBootOptionsBootableDevice struct { VirtualMachineBootOptionsBootableDevice []BaseVirtualMachineBootOptionsBootableDevice `xml:"VirtualMachineBootOptionsBootableDevice,omitempty,typeattr"` } @@ -5237,6 +5628,14 @@ func init() { t["ArrayOfVirtualMachineCdromInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineCdromInfo)(nil)).Elem() } +type ArrayOfVirtualMachineCertThumbprint struct { + VirtualMachineCertThumbprint []VirtualMachineCertThumbprint `xml:"VirtualMachineCertThumbprint,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineCertThumbprint"] = reflect.TypeOf((*ArrayOfVirtualMachineCertThumbprint)(nil)).Elem() +} + type ArrayOfVirtualMachineConfigInfoDatastoreUrlPair struct { VirtualMachineConfigInfoDatastoreUrlPair []VirtualMachineConfigInfoDatastoreUrlPair `xml:"VirtualMachineConfigInfoDatastoreUrlPair,omitempty"` } @@ -5253,6 +5652,22 @@ func init() { t["ArrayOfVirtualMachineConfigOptionDescriptor"] = reflect.TypeOf((*ArrayOfVirtualMachineConfigOptionDescriptor)(nil)).Elem() } +type ArrayOfVirtualMachineConfigSpec struct { + VirtualMachineConfigSpec []VirtualMachineConfigSpec `xml:"VirtualMachineConfigSpec,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineConfigSpec"] = reflect.TypeOf((*ArrayOfVirtualMachineConfigSpec)(nil)).Elem() +} + +type ArrayOfVirtualMachineConnection struct { + VirtualMachineConnection []BaseVirtualMachineConnection `xml:"VirtualMachineConnection,omitempty,typeattr"` +} + +func init() { + t["ArrayOfVirtualMachineConnection"] = reflect.TypeOf((*ArrayOfVirtualMachineConnection)(nil)).Elem() +} + type ArrayOfVirtualMachineCpuIdInfoSpec struct { VirtualMachineCpuIdInfoSpec []VirtualMachineCpuIdInfoSpec `xml:"VirtualMachineCpuIdInfoSpec,omitempty"` } @@ -5293,6 +5708,22 @@ func init() { t["ArrayOfVirtualMachineDisplayTopology"] = reflect.TypeOf((*ArrayOfVirtualMachineDisplayTopology)(nil)).Elem() } +type ArrayOfVirtualMachineDvxClassInfo struct { + VirtualMachineDvxClassInfo []VirtualMachineDvxClassInfo `xml:"VirtualMachineDvxClassInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineDvxClassInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineDvxClassInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineDynamicPassthroughInfo struct { + VirtualMachineDynamicPassthroughInfo []VirtualMachineDynamicPassthroughInfo `xml:"VirtualMachineDynamicPassthroughInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineDynamicPassthroughInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineDynamicPassthroughInfo)(nil)).Elem() +} + type ArrayOfVirtualMachineFeatureRequirement struct { VirtualMachineFeatureRequirement []VirtualMachineFeatureRequirement `xml:"VirtualMachineFeatureRequirement,omitempty"` } @@ -5437,6 +5868,14 @@ func init() { t["ArrayOfVirtualMachinePciSharedGpuPassthroughInfo"] = reflect.TypeOf((*ArrayOfVirtualMachinePciSharedGpuPassthroughInfo)(nil)).Elem() } +type ArrayOfVirtualMachinePrecisionClockInfo struct { + VirtualMachinePrecisionClockInfo []VirtualMachinePrecisionClockInfo `xml:"VirtualMachinePrecisionClockInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachinePrecisionClockInfo"] = reflect.TypeOf((*ArrayOfVirtualMachinePrecisionClockInfo)(nil)).Elem() +} + type ArrayOfVirtualMachineProfileDetailsDiskProfileDetails struct { VirtualMachineProfileDetailsDiskProfileDetails []VirtualMachineProfileDetailsDiskProfileDetails `xml:"VirtualMachineProfileDetailsDiskProfileDetails,omitempty"` } @@ -5461,6 +5900,14 @@ func init() { t["ArrayOfVirtualMachinePropertyRelation"] = reflect.TypeOf((*ArrayOfVirtualMachinePropertyRelation)(nil)).Elem() } +type ArrayOfVirtualMachineQuickStatsMemoryTierStats struct { + VirtualMachineQuickStatsMemoryTierStats []VirtualMachineQuickStatsMemoryTierStats `xml:"VirtualMachineQuickStatsMemoryTierStats,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineQuickStatsMemoryTierStats"] = reflect.TypeOf((*ArrayOfVirtualMachineQuickStatsMemoryTierStats)(nil)).Elem() +} + type ArrayOfVirtualMachineRelocateSpecDiskLocator struct { VirtualMachineRelocateSpecDiskLocator []VirtualMachineRelocateSpecDiskLocator `xml:"VirtualMachineRelocateSpecDiskLocator,omitempty"` } @@ -5557,6 +6004,54 @@ func init() { t["ArrayOfVirtualMachineVMCIDeviceFilterSpec"] = reflect.TypeOf((*ArrayOfVirtualMachineVMCIDeviceFilterSpec)(nil)).Elem() } +type ArrayOfVirtualMachineVcpuConfig struct { + VirtualMachineVcpuConfig []VirtualMachineVcpuConfig `xml:"VirtualMachineVcpuConfig,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineVcpuConfig"] = reflect.TypeOf((*ArrayOfVirtualMachineVcpuConfig)(nil)).Elem() +} + +type ArrayOfVirtualMachineVendorDeviceGroupInfo struct { + VirtualMachineVendorDeviceGroupInfo []VirtualMachineVendorDeviceGroupInfo `xml:"VirtualMachineVendorDeviceGroupInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineVendorDeviceGroupInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineVendorDeviceGroupInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineVendorDeviceGroupInfoComponentDeviceInfo struct { + VirtualMachineVendorDeviceGroupInfoComponentDeviceInfo []VirtualMachineVendorDeviceGroupInfoComponentDeviceInfo `xml:"VirtualMachineVendorDeviceGroupInfoComponentDeviceInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineVendorDeviceGroupInfoComponentDeviceInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineVendorDeviceGroupInfoComponentDeviceInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineVgpuDeviceInfo struct { + VirtualMachineVgpuDeviceInfo []VirtualMachineVgpuDeviceInfo `xml:"VirtualMachineVgpuDeviceInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineVgpuDeviceInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineVgpuDeviceInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineVgpuProfileInfo struct { + VirtualMachineVgpuProfileInfo []VirtualMachineVgpuProfileInfo `xml:"VirtualMachineVgpuProfileInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineVgpuProfileInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineVgpuProfileInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineVirtualDeviceGroupsDeviceGroup struct { + VirtualMachineVirtualDeviceGroupsDeviceGroup []BaseVirtualMachineVirtualDeviceGroupsDeviceGroup `xml:"VirtualMachineVirtualDeviceGroupsDeviceGroup,omitempty,typeattr"` +} + +func init() { + t["ArrayOfVirtualMachineVirtualDeviceGroupsDeviceGroup"] = reflect.TypeOf((*ArrayOfVirtualMachineVirtualDeviceGroupsDeviceGroup)(nil)).Elem() +} + type ArrayOfVirtualNicManagerNetConfig struct { VirtualNicManagerNetConfig []VirtualNicManagerNetConfig `xml:"VirtualNicManagerNetConfig,omitempty"` } @@ -5565,6 +6060,14 @@ func init() { t["ArrayOfVirtualNicManagerNetConfig"] = reflect.TypeOf((*ArrayOfVirtualNicManagerNetConfig)(nil)).Elem() } +type ArrayOfVirtualPCIPassthroughAllowedDevice struct { + VirtualPCIPassthroughAllowedDevice []VirtualPCIPassthroughAllowedDevice `xml:"VirtualPCIPassthroughAllowedDevice,omitempty"` +} + +func init() { + t["ArrayOfVirtualPCIPassthroughAllowedDevice"] = reflect.TypeOf((*ArrayOfVirtualPCIPassthroughAllowedDevice)(nil)).Elem() +} + type ArrayOfVirtualSCSISharing struct { VirtualSCSISharing []VirtualSCSISharing `xml:"VirtualSCSISharing,omitempty"` } @@ -6112,6 +6615,8 @@ type BaseConfigInfo struct { NativeSnapshotSupported *bool `xml:"nativeSnapshotSupported"` ChangedBlockTrackingEnabled *bool `xml:"changedBlockTrackingEnabled"` Backing BaseBaseConfigInfoBackingInfo `xml:"backing,typeattr"` + Metadata []KeyValue `xml:"metadata,omitempty"` + Vclock *VslmVClockInfo `xml:"vclock,omitempty"` Iofilter []string `xml:"iofilter,omitempty"` } @@ -6146,6 +6651,7 @@ type BaseConfigInfoFileBackingInfo struct { BackingObjectId string `xml:"backingObjectId,omitempty"` Parent BaseBaseConfigInfoFileBackingInfo `xml:"parent,omitempty,typeattr"` DeltaSizeInMB int64 `xml:"deltaSizeInMB,omitempty"` + KeyId *CryptoKeyId `xml:"keyId,omitempty"` } func init() { @@ -6163,6 +6669,69 @@ func init() { t["BaseConfigInfoRawDiskMappingBackingInfo"] = reflect.TypeOf((*BaseConfigInfoRawDiskMappingBackingInfo)(nil)).Elem() } +type BatchAddHostsToClusterRequestType struct { + This ManagedObjectReference `xml:"_this"` + Cluster ManagedObjectReference `xml:"cluster"` + NewHosts []FolderNewHostSpec `xml:"newHosts,omitempty"` + ExistingHosts []ManagedObjectReference `xml:"existingHosts,omitempty"` + CompResSpec BaseComputeResourceConfigSpec `xml:"compResSpec,omitempty,typeattr"` + DesiredState string `xml:"desiredState,omitempty"` +} + +func init() { + t["BatchAddHostsToClusterRequestType"] = reflect.TypeOf((*BatchAddHostsToClusterRequestType)(nil)).Elem() +} + +type BatchAddHostsToCluster_Task BatchAddHostsToClusterRequestType + +func init() { + t["BatchAddHostsToCluster_Task"] = reflect.TypeOf((*BatchAddHostsToCluster_Task)(nil)).Elem() +} + +type BatchAddHostsToCluster_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type BatchAddStandaloneHostsRequestType struct { + This ManagedObjectReference `xml:"_this"` + NewHosts []FolderNewHostSpec `xml:"newHosts,omitempty"` + CompResSpec BaseComputeResourceConfigSpec `xml:"compResSpec,omitempty,typeattr"` + AddConnected bool `xml:"addConnected"` +} + +func init() { + t["BatchAddStandaloneHostsRequestType"] = reflect.TypeOf((*BatchAddStandaloneHostsRequestType)(nil)).Elem() +} + +type BatchAddStandaloneHosts_Task BatchAddStandaloneHostsRequestType + +func init() { + t["BatchAddStandaloneHosts_Task"] = reflect.TypeOf((*BatchAddStandaloneHosts_Task)(nil)).Elem() +} + +type BatchAddStandaloneHosts_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type BatchQueryConnectInfo BatchQueryConnectInfoRequestType + +func init() { + t["BatchQueryConnectInfo"] = reflect.TypeOf((*BatchQueryConnectInfo)(nil)).Elem() +} + +type BatchQueryConnectInfoRequestType struct { + This ManagedObjectReference `xml:"_this"` + HostSpecs []HostConnectSpec `xml:"hostSpecs,omitempty"` +} + +func init() { + t["BatchQueryConnectInfoRequestType"] = reflect.TypeOf((*BatchQueryConnectInfoRequestType)(nil)).Elem() +} + +type BatchQueryConnectInfoResponse struct { + Returnval []DatacenterBasicConnectInfo `xml:"returnval,omitempty"` +} + type BatchResult struct { DynamicData @@ -6873,12 +7442,21 @@ func init() { type Capability struct { DynamicData - ProvisioningSupported bool `xml:"provisioningSupported"` - MultiHostSupported bool `xml:"multiHostSupported"` - UserShellAccessSupported bool `xml:"userShellAccessSupported"` - SupportedEVCMode []EVCMode `xml:"supportedEVCMode,omitempty"` - NetworkBackupAndRestoreSupported *bool `xml:"networkBackupAndRestoreSupported"` - FtDrsWithoutEvcSupported *bool `xml:"ftDrsWithoutEvcSupported"` + ProvisioningSupported bool `xml:"provisioningSupported"` + MultiHostSupported bool `xml:"multiHostSupported"` + UserShellAccessSupported bool `xml:"userShellAccessSupported"` + SupportedEVCMode []EVCMode `xml:"supportedEVCMode,omitempty"` + SupportedEVCGraphicsMode []FeatureEVCMode `xml:"supportedEVCGraphicsMode,omitempty"` + NetworkBackupAndRestoreSupported *bool `xml:"networkBackupAndRestoreSupported"` + FtDrsWithoutEvcSupported *bool `xml:"ftDrsWithoutEvcSupported"` + HciWorkflowSupported *bool `xml:"hciWorkflowSupported"` + ComputePolicyVersion int32 `xml:"computePolicyVersion,omitempty"` + ClusterPlacementSupported *bool `xml:"clusterPlacementSupported"` + LifecycleManagementSupported *bool `xml:"lifecycleManagementSupported"` + HostSeedingSupported *bool `xml:"hostSeedingSupported"` + ScalableSharesSupported *bool `xml:"scalableSharesSupported"` + HadcsSupported *bool `xml:"hadcsSupported"` + ConfigMgmtSupported *bool `xml:"configMgmtSupported"` } func init() { @@ -7058,6 +7636,26 @@ func init() { type ChangeOwnerResponse struct { } +type ChangePassword ChangePasswordRequestType + +func init() { + t["ChangePassword"] = reflect.TypeOf((*ChangePassword)(nil)).Elem() +} + +type ChangePasswordRequestType struct { + This ManagedObjectReference `xml:"_this"` + User string `xml:"user"` + OldPassword string `xml:"oldPassword"` + NewPassword string `xml:"newPassword"` +} + +func init() { + t["ChangePasswordRequestType"] = reflect.TypeOf((*ChangePasswordRequestType)(nil)).Elem() +} + +type ChangePasswordResponse struct { +} + type ChangesInfoEventArgument struct { DynamicData @@ -7174,8 +7772,9 @@ type CheckCompliance_TaskResponse struct { } type CheckConfigureEvcModeRequestType struct { - This ManagedObjectReference `xml:"_this"` - EvcModeKey string `xml:"evcModeKey"` + This ManagedObjectReference `xml:"_this"` + EvcModeKey string `xml:"evcModeKey"` + EvcGraphicsModeKey string `xml:"evcGraphicsModeKey,omitempty"` } func init() { @@ -7714,6 +8313,18 @@ func init() { t["ClusterAttemptedVmInfo"] = reflect.TypeOf((*ClusterAttemptedVmInfo)(nil)).Elem() } +type ClusterClusterInitialPlacementAction struct { + ClusterAction + + TargetHost *ManagedObjectReference `xml:"targetHost,omitempty"` + Pool ManagedObjectReference `xml:"pool"` + ConfigSpec *VirtualMachineConfigSpec `xml:"configSpec,omitempty"` +} + +func init() { + t["ClusterClusterInitialPlacementAction"] = reflect.TypeOf((*ClusterClusterInitialPlacementAction)(nil)).Elem() +} + type ClusterComplianceCheckedEvent struct { ClusterEvent @@ -7724,23 +8335,208 @@ func init() { t["ClusterComplianceCheckedEvent"] = reflect.TypeOf((*ClusterComplianceCheckedEvent)(nil)).Elem() } +type ClusterComputeResourceClusterConfigResult struct { + DynamicData + + FailedHosts []FolderFailedHostResult `xml:"failedHosts,omitempty"` + ConfiguredHosts []ManagedObjectReference `xml:"configuredHosts,omitempty"` +} + +func init() { + t["ClusterComputeResourceClusterConfigResult"] = reflect.TypeOf((*ClusterComputeResourceClusterConfigResult)(nil)).Elem() +} + +type ClusterComputeResourceDVSConfigurationValidation struct { + ClusterComputeResourceValidationResultBase + + IsDvsValid bool `xml:"isDvsValid"` + IsDvpgValid bool `xml:"isDvpgValid"` +} + +func init() { + t["ClusterComputeResourceDVSConfigurationValidation"] = reflect.TypeOf((*ClusterComputeResourceDVSConfigurationValidation)(nil)).Elem() +} + +type ClusterComputeResourceDVSSetting struct { + DynamicData + + DvSwitch ManagedObjectReference `xml:"dvSwitch"` + PnicDevices []string `xml:"pnicDevices,omitempty"` + DvPortgroupSetting []ClusterComputeResourceDVSSettingDVPortgroupToServiceMapping `xml:"dvPortgroupSetting,omitempty"` +} + +func init() { + t["ClusterComputeResourceDVSSetting"] = reflect.TypeOf((*ClusterComputeResourceDVSSetting)(nil)).Elem() +} + +type ClusterComputeResourceDVSSettingDVPortgroupToServiceMapping struct { + DynamicData + + DvPortgroup ManagedObjectReference `xml:"dvPortgroup"` + Service string `xml:"service"` +} + +func init() { + t["ClusterComputeResourceDVSSettingDVPortgroupToServiceMapping"] = reflect.TypeOf((*ClusterComputeResourceDVSSettingDVPortgroupToServiceMapping)(nil)).Elem() +} + +type ClusterComputeResourceDvsProfile struct { + DynamicData + + DvsName string `xml:"dvsName,omitempty"` + DvSwitch *ManagedObjectReference `xml:"dvSwitch,omitempty"` + PnicDevices []string `xml:"pnicDevices,omitempty"` + DvPortgroupMapping []ClusterComputeResourceDvsProfileDVPortgroupSpecToServiceMapping `xml:"dvPortgroupMapping,omitempty"` +} + +func init() { + t["ClusterComputeResourceDvsProfile"] = reflect.TypeOf((*ClusterComputeResourceDvsProfile)(nil)).Elem() +} + +type ClusterComputeResourceDvsProfileDVPortgroupSpecToServiceMapping struct { + DynamicData + + DvPortgroupSpec *DVPortgroupConfigSpec `xml:"dvPortgroupSpec,omitempty"` + DvPortgroup *ManagedObjectReference `xml:"dvPortgroup,omitempty"` + Service string `xml:"service"` +} + +func init() { + t["ClusterComputeResourceDvsProfileDVPortgroupSpecToServiceMapping"] = reflect.TypeOf((*ClusterComputeResourceDvsProfileDVPortgroupSpecToServiceMapping)(nil)).Elem() +} + +type ClusterComputeResourceHCIConfigInfo struct { + DynamicData + + WorkflowState string `xml:"workflowState"` + DvsSetting []ClusterComputeResourceDVSSetting `xml:"dvsSetting,omitempty"` + ConfiguredHosts []ManagedObjectReference `xml:"configuredHosts,omitempty"` + HostConfigProfile *ClusterComputeResourceHostConfigurationProfile `xml:"hostConfigProfile,omitempty"` +} + +func init() { + t["ClusterComputeResourceHCIConfigInfo"] = reflect.TypeOf((*ClusterComputeResourceHCIConfigInfo)(nil)).Elem() +} + +type ClusterComputeResourceHCIConfigSpec struct { + DynamicData + + DvsProf []ClusterComputeResourceDvsProfile `xml:"dvsProf,omitempty"` + HostConfigProfile *ClusterComputeResourceHostConfigurationProfile `xml:"hostConfigProfile,omitempty"` + VSanConfigSpec *SDDCBase `xml:"vSanConfigSpec,omitempty"` + VcProf *ClusterComputeResourceVCProfile `xml:"vcProf,omitempty"` +} + +func init() { + t["ClusterComputeResourceHCIConfigSpec"] = reflect.TypeOf((*ClusterComputeResourceHCIConfigSpec)(nil)).Elem() +} + +type ClusterComputeResourceHostConfigurationInput struct { + DynamicData + + Host ManagedObjectReference `xml:"host"` + HostVmkNics []ClusterComputeResourceHostVmkNicInfo `xml:"hostVmkNics,omitempty"` + AllowedInNonMaintenanceMode *bool `xml:"allowedInNonMaintenanceMode"` +} + +func init() { + t["ClusterComputeResourceHostConfigurationInput"] = reflect.TypeOf((*ClusterComputeResourceHostConfigurationInput)(nil)).Elem() +} + +type ClusterComputeResourceHostConfigurationProfile struct { + DynamicData + + DateTimeConfig *HostDateTimeConfig `xml:"dateTimeConfig,omitempty"` + LockdownMode HostLockdownMode `xml:"lockdownMode,omitempty"` +} + +func init() { + t["ClusterComputeResourceHostConfigurationProfile"] = reflect.TypeOf((*ClusterComputeResourceHostConfigurationProfile)(nil)).Elem() +} + +type ClusterComputeResourceHostConfigurationValidation struct { + ClusterComputeResourceValidationResultBase + + Host ManagedObjectReference `xml:"host"` + IsDvsSettingValid *bool `xml:"isDvsSettingValid"` + IsVmknicSettingValid *bool `xml:"isVmknicSettingValid"` + IsNtpSettingValid *bool `xml:"isNtpSettingValid"` + IsLockdownModeValid *bool `xml:"isLockdownModeValid"` +} + +func init() { + t["ClusterComputeResourceHostConfigurationValidation"] = reflect.TypeOf((*ClusterComputeResourceHostConfigurationValidation)(nil)).Elem() +} + +type ClusterComputeResourceHostVmkNicInfo struct { + DynamicData + + NicSpec HostVirtualNicSpec `xml:"nicSpec"` + Service string `xml:"service"` +} + +func init() { + t["ClusterComputeResourceHostVmkNicInfo"] = reflect.TypeOf((*ClusterComputeResourceHostVmkNicInfo)(nil)).Elem() +} + type ClusterComputeResourceSummary struct { ComputeResourceSummary - CurrentFailoverLevel int32 `xml:"currentFailoverLevel"` - AdmissionControlInfo BaseClusterDasAdmissionControlInfo `xml:"admissionControlInfo,omitempty,typeattr"` - NumVmotions int32 `xml:"numVmotions"` - TargetBalance int32 `xml:"targetBalance,omitempty"` - CurrentBalance int32 `xml:"currentBalance,omitempty"` - UsageSummary *ClusterUsageSummary `xml:"usageSummary,omitempty"` - CurrentEVCModeKey string `xml:"currentEVCModeKey,omitempty"` - DasData BaseClusterDasData `xml:"dasData,omitempty,typeattr"` + CurrentFailoverLevel int32 `xml:"currentFailoverLevel"` + AdmissionControlInfo BaseClusterDasAdmissionControlInfo `xml:"admissionControlInfo,omitempty,typeattr"` + NumVmotions int32 `xml:"numVmotions"` + TargetBalance int32 `xml:"targetBalance,omitempty"` + CurrentBalance int32 `xml:"currentBalance,omitempty"` + DrsScore int32 `xml:"drsScore,omitempty"` + NumVmsPerDrsScoreBucket []int32 `xml:"numVmsPerDrsScoreBucket,omitempty"` + UsageSummary *ClusterUsageSummary `xml:"usageSummary,omitempty"` + CurrentEVCModeKey string `xml:"currentEVCModeKey,omitempty"` + CurrentEVCGraphicsModeKey string `xml:"currentEVCGraphicsModeKey,omitempty"` + DasData BaseClusterDasData `xml:"dasData,omitempty,typeattr"` + ClusterMaintenanceModeStatus string `xml:"clusterMaintenanceModeStatus,omitempty"` + VcsHealthStatus string `xml:"vcsHealthStatus,omitempty"` + VcsSlots []ClusterComputeResourceVcsSlots `xml:"vcsSlots,omitempty"` } func init() { t["ClusterComputeResourceSummary"] = reflect.TypeOf((*ClusterComputeResourceSummary)(nil)).Elem() } +type ClusterComputeResourceVCProfile struct { + DynamicData + + ClusterSpec *ClusterConfigSpecEx `xml:"clusterSpec,omitempty"` + EvcModeKey string `xml:"evcModeKey,omitempty"` + EvcGraphicsModeKey string `xml:"evcGraphicsModeKey,omitempty"` +} + +func init() { + t["ClusterComputeResourceVCProfile"] = reflect.TypeOf((*ClusterComputeResourceVCProfile)(nil)).Elem() +} + +type ClusterComputeResourceValidationResultBase struct { + DynamicData + + Info []LocalizableMessage `xml:"info,omitempty"` +} + +func init() { + t["ClusterComputeResourceValidationResultBase"] = reflect.TypeOf((*ClusterComputeResourceValidationResultBase)(nil)).Elem() +} + +type ClusterComputeResourceVcsSlots struct { + DynamicData + + SystemId string `xml:"systemId,omitempty"` + Host ManagedObjectReference `xml:"host"` + Datastore []ManagedObjectReference `xml:"datastore,omitempty"` + TotalSlots int32 `xml:"totalSlots"` +} + +func init() { + t["ClusterComputeResourceVcsSlots"] = reflect.TypeOf((*ClusterComputeResourceVcsSlots)(nil)).Elem() +} + type ClusterConfigInfo struct { DynamicData @@ -7758,6 +8554,7 @@ func init() { type ClusterConfigInfoEx struct { ComputeResourceConfigInfo + SystemVMsConfig *ClusterSystemVMsConfigInfo `xml:"systemVMsConfig,omitempty"` DasConfig ClusterDasConfigInfo `xml:"dasConfig"` DasVmConfig []ClusterDasVmConfigInfo `xml:"dasVmConfig,omitempty"` DrsConfig ClusterDrsConfigInfo `xml:"drsConfig"` @@ -7772,6 +8569,7 @@ type ClusterConfigInfoEx struct { Group []BaseClusterGroupInfo `xml:"group,omitempty,typeattr"` InfraUpdateHaConfig *ClusterInfraUpdateHaConfigInfo `xml:"infraUpdateHaConfig,omitempty"` ProactiveDrsConfig *ClusterProactiveDrsConfigInfo `xml:"proactiveDrsConfig,omitempty"` + CryptoConfig *ClusterCryptoConfigInfo `xml:"cryptoConfig,omitempty"` } func init() { @@ -7795,6 +8593,7 @@ func init() { type ClusterConfigSpecEx struct { ComputeResourceConfigSpec + SystemVMsConfig *ClusterSystemVMsConfigSpec `xml:"systemVMsConfig,omitempty"` DasConfig *ClusterDasConfigInfo `xml:"dasConfig,omitempty"` DasVmConfigSpec []ClusterDasVmConfigSpec `xml:"dasVmConfigSpec,omitempty"` DrsConfig *ClusterDrsConfigInfo `xml:"drsConfig,omitempty"` @@ -7809,6 +8608,8 @@ type ClusterConfigSpecEx struct { GroupSpec []ClusterGroupSpec `xml:"groupSpec,omitempty"` InfraUpdateHaConfig *ClusterInfraUpdateHaConfigInfo `xml:"infraUpdateHaConfig,omitempty"` ProactiveDrsConfig *ClusterProactiveDrsConfigInfo `xml:"proactiveDrsConfig,omitempty"` + InHciWorkflow *bool `xml:"inHciWorkflow"` + CryptoConfig *ClusterCryptoConfigInfo `xml:"cryptoConfig,omitempty"` } func init() { @@ -7825,6 +8626,16 @@ func init() { t["ClusterCreatedEvent"] = reflect.TypeOf((*ClusterCreatedEvent)(nil)).Elem() } +type ClusterCryptoConfigInfo struct { + DynamicData + + CryptoMode string `xml:"cryptoMode,omitempty"` +} + +func init() { + t["ClusterCryptoConfigInfo"] = reflect.TypeOf((*ClusterCryptoConfigInfo)(nil)).Elem() +} + type ClusterDasAamHostInfo struct { ClusterDasHostInfo @@ -7860,7 +8671,8 @@ func init() { type ClusterDasAdmissionControlPolicy struct { DynamicData - ResourceReductionToToleratePercent int32 `xml:"resourceReductionToToleratePercent,omitempty"` + ResourceReductionToToleratePercent *int32 `xml:"resourceReductionToToleratePercent"` + PMemAdmissionControlEnabled *bool `xml:"pMemAdmissionControlEnabled"` } func init() { @@ -8049,6 +8861,16 @@ func init() { t["ClusterDasVmSettings"] = reflect.TypeOf((*ClusterDasVmSettings)(nil)).Elem() } +type ClusterDatastoreUpdateSpec struct { + ArrayUpdateSpec + + Datastore *ManagedObjectReference `xml:"datastore,omitempty"` +} + +func init() { + t["ClusterDatastoreUpdateSpec"] = reflect.TypeOf((*ClusterDatastoreUpdateSpec)(nil)).Elem() +} + type ClusterDependencyRuleInfo struct { ClusterRuleInfo @@ -8110,6 +8932,7 @@ type ClusterDrsConfigInfo struct { EnableVmBehaviorOverrides *bool `xml:"enableVmBehaviorOverrides"` DefaultVmBehavior DrsBehavior `xml:"defaultVmBehavior,omitempty"` VmotionRate int32 `xml:"vmotionRate,omitempty"` + ScaleDescendantsShares string `xml:"scaleDescendantsShares,omitempty"` Option []BaseOptionValue `xml:"option,omitempty,typeattr"` } @@ -8329,6 +9152,7 @@ type ClusterFailoverResourcesAdmissionControlInfo struct { CurrentCpuFailoverResourcesPercent int32 `xml:"currentCpuFailoverResourcesPercent"` CurrentMemoryFailoverResourcesPercent int32 `xml:"currentMemoryFailoverResourcesPercent"` + CurrentPMemFailoverResourcesPercent int32 `xml:"currentPMemFailoverResourcesPercent,omitempty"` } func init() { @@ -8338,10 +9162,12 @@ func init() { type ClusterFailoverResourcesAdmissionControlPolicy struct { ClusterDasAdmissionControlPolicy - CpuFailoverResourcesPercent int32 `xml:"cpuFailoverResourcesPercent"` - MemoryFailoverResourcesPercent int32 `xml:"memoryFailoverResourcesPercent"` - FailoverLevel int32 `xml:"failoverLevel,omitempty"` - AutoComputePercentages *bool `xml:"autoComputePercentages"` + CpuFailoverResourcesPercent int32 `xml:"cpuFailoverResourcesPercent"` + MemoryFailoverResourcesPercent int32 `xml:"memoryFailoverResourcesPercent"` + FailoverLevel int32 `xml:"failoverLevel,omitempty"` + AutoComputePercentages *bool `xml:"autoComputePercentages"` + PMemFailoverResourcesPercent int32 `xml:"pMemFailoverResourcesPercent,omitempty"` + AutoComputePMemFailoverResourcesPercent *bool `xml:"autoComputePMemFailoverResourcesPercent"` } func init() { @@ -8523,6 +9349,28 @@ func init() { t["ClusterPowerOnVmResult"] = reflect.TypeOf((*ClusterPowerOnVmResult)(nil)).Elem() } +type ClusterPreemptibleVmPairInfo struct { + DynamicData + + Id int32 `xml:"id,omitempty"` + MonitoredVm ManagedObjectReference `xml:"monitoredVm"` + PreemptibleVm ManagedObjectReference `xml:"preemptibleVm"` +} + +func init() { + t["ClusterPreemptibleVmPairInfo"] = reflect.TypeOf((*ClusterPreemptibleVmPairInfo)(nil)).Elem() +} + +type ClusterPreemptibleVmPairSpec struct { + ArrayUpdateSpec + + Info *ClusterPreemptibleVmPairInfo `xml:"info,omitempty"` +} + +func init() { + t["ClusterPreemptibleVmPairSpec"] = reflect.TypeOf((*ClusterPreemptibleVmPairSpec)(nil)).Elem() +} + type ClusterProactiveDrsConfigInfo struct { DynamicData @@ -8672,6 +9520,40 @@ func init() { t["ClusterStatusChangedEvent"] = reflect.TypeOf((*ClusterStatusChangedEvent)(nil)).Elem() } +type ClusterSystemVMsConfigInfo struct { + DynamicData + + AllowedDatastores []ManagedObjectReference `xml:"allowedDatastores,omitempty"` + NotAllowedDatastores []ManagedObjectReference `xml:"notAllowedDatastores,omitempty"` + DsTagCategoriesToExclude []string `xml:"dsTagCategoriesToExclude,omitempty"` +} + +func init() { + t["ClusterSystemVMsConfigInfo"] = reflect.TypeOf((*ClusterSystemVMsConfigInfo)(nil)).Elem() +} + +type ClusterSystemVMsConfigSpec struct { + DynamicData + + AllowedDatastores []ClusterDatastoreUpdateSpec `xml:"allowedDatastores,omitempty"` + NotAllowedDatastores []ClusterDatastoreUpdateSpec `xml:"notAllowedDatastores,omitempty"` + DsTagCategoriesToExclude []ClusterTagCategoryUpdateSpec `xml:"dsTagCategoriesToExclude,omitempty"` +} + +func init() { + t["ClusterSystemVMsConfigSpec"] = reflect.TypeOf((*ClusterSystemVMsConfigSpec)(nil)).Elem() +} + +type ClusterTagCategoryUpdateSpec struct { + ArrayUpdateSpec + + Category string `xml:"category,omitempty"` +} + +func init() { + t["ClusterTagCategoryUpdateSpec"] = reflect.TypeOf((*ClusterTagCategoryUpdateSpec)(nil)).Elem() +} + type ClusterUsageSummary struct { DynamicData @@ -8936,6 +9818,7 @@ type ComputeResourceConfigInfo struct { VmSwapPlacement string `xml:"vmSwapPlacement"` SpbmEnabled *bool `xml:"spbmEnabled"` DefaultHardwareVersionKey string `xml:"defaultHardwareVersionKey,omitempty"` + MaximumHardwareVersionKey string `xml:"maximumHardwareVersionKey,omitempty"` } func init() { @@ -8945,9 +9828,12 @@ func init() { type ComputeResourceConfigSpec struct { DynamicData - VmSwapPlacement string `xml:"vmSwapPlacement,omitempty"` - SpbmEnabled *bool `xml:"spbmEnabled"` - DefaultHardwareVersionKey string `xml:"defaultHardwareVersionKey,omitempty"` + VmSwapPlacement string `xml:"vmSwapPlacement,omitempty"` + SpbmEnabled *bool `xml:"spbmEnabled"` + DefaultHardwareVersionKey string `xml:"defaultHardwareVersionKey,omitempty"` + DesiredSoftwareSpec *DesiredSoftwareSpec `xml:"desiredSoftwareSpec,omitempty"` + MaximumHardwareVersionKey string `xml:"maximumHardwareVersionKey,omitempty"` + EnableConfigManager *bool `xml:"enableConfigManager"` } func init() { @@ -9013,6 +9899,7 @@ type ConfigTarget struct { NumCpus int32 `xml:"numCpus"` NumCpuCores int32 `xml:"numCpuCores"` NumNumaNodes int32 `xml:"numNumaNodes"` + MaxCpusPerHost int32 `xml:"maxCpusPerHost,omitempty"` SmcPresent *bool `xml:"smcPresent"` Datastore []VirtualMachineDatastoreInfo `xml:"datastore,omitempty"` Network []VirtualMachineNetworkInfo `xml:"network,omitempty"` @@ -9030,6 +9917,7 @@ type ConfigTarget struct { ScsiDisk []VirtualMachineScsiDiskDeviceInfo `xml:"scsiDisk,omitempty"` IdeDisk []VirtualMachineIdeDiskDeviceInfo `xml:"ideDisk,omitempty"` MaxMemMBOptimalPerf int32 `xml:"maxMemMBOptimalPerf"` + SupportedMaxMemMB int32 `xml:"supportedMaxMemMB,omitempty"` ResourcePool *ResourcePoolRuntimeInfo `xml:"resourcePool,omitempty"` AutoVmotion *bool `xml:"autoVmotion"` PciPassthrough []BaseVirtualMachinePciPassthroughInfo `xml:"pciPassthrough,omitempty,typeattr"` @@ -9037,6 +9925,15 @@ type ConfigTarget struct { VFlashModule []VirtualMachineVFlashModuleInfo `xml:"vFlashModule,omitempty"` SharedGpuPassthroughTypes []VirtualMachinePciSharedGpuPassthroughInfo `xml:"sharedGpuPassthroughTypes,omitempty"` AvailablePersistentMemoryReservationMB int64 `xml:"availablePersistentMemoryReservationMB,omitempty"` + DynamicPassthrough []VirtualMachineDynamicPassthroughInfo `xml:"dynamicPassthrough,omitempty"` + SgxTargetInfo *VirtualMachineSgxTargetInfo `xml:"sgxTargetInfo,omitempty"` + PrecisionClockInfo []VirtualMachinePrecisionClockInfo `xml:"precisionClockInfo,omitempty"` + SevSupported *bool `xml:"sevSupported"` + VgpuDeviceInfo []VirtualMachineVgpuDeviceInfo `xml:"vgpuDeviceInfo,omitempty"` + VgpuProfileInfo []VirtualMachineVgpuProfileInfo `xml:"vgpuProfileInfo,omitempty"` + VendorDeviceGroupInfo []VirtualMachineVendorDeviceGroupInfo `xml:"vendorDeviceGroupInfo,omitempty"` + MaxSimultaneousThreads int32 `xml:"maxSimultaneousThreads,omitempty"` + DvxClassInfo []VirtualMachineDvxClassInfo `xml:"dvxClassInfo,omitempty"` } func init() { @@ -9101,8 +9998,9 @@ type ConfigureDatastorePrincipalResponse struct { } type ConfigureEvcModeRequestType struct { - This ManagedObjectReference `xml:"_this"` - EvcModeKey string `xml:"evcModeKey"` + This ManagedObjectReference `xml:"_this"` + EvcModeKey string `xml:"evcModeKey"` + EvcGraphicsModeKey string `xml:"evcGraphicsModeKey,omitempty"` } func init() { @@ -9119,6 +10017,26 @@ type ConfigureEvcMode_TaskResponse struct { Returnval ManagedObjectReference `xml:"returnval"` } +type ConfigureHCIRequestType struct { + This ManagedObjectReference `xml:"_this"` + ClusterSpec ClusterComputeResourceHCIConfigSpec `xml:"clusterSpec"` + HostInputs []ClusterComputeResourceHostConfigurationInput `xml:"hostInputs,omitempty"` +} + +func init() { + t["ConfigureHCIRequestType"] = reflect.TypeOf((*ConfigureHCIRequestType)(nil)).Elem() +} + +type ConfigureHCI_Task ConfigureHCIRequestType + +func init() { + t["ConfigureHCI_Task"] = reflect.TypeOf((*ConfigureHCI_Task)(nil)).Elem() +} + +type ConfigureHCI_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + type ConfigureHostCacheRequestType struct { This ManagedObjectReference `xml:"_this"` Spec HostCacheConfigurationSpec `xml:"spec"` @@ -9259,6 +10177,43 @@ func init() { t["ConflictingDatastoreFoundFault"] = reflect.TypeOf((*ConflictingDatastoreFoundFault)(nil)).Elem() } +type ConnectNvmeController ConnectNvmeControllerRequestType + +func init() { + t["ConnectNvmeController"] = reflect.TypeOf((*ConnectNvmeController)(nil)).Elem() +} + +type ConnectNvmeControllerExRequestType struct { + This ManagedObjectReference `xml:"_this"` + ConnectSpec []HostNvmeConnectSpec `xml:"connectSpec,omitempty"` +} + +func init() { + t["ConnectNvmeControllerExRequestType"] = reflect.TypeOf((*ConnectNvmeControllerExRequestType)(nil)).Elem() +} + +type ConnectNvmeControllerEx_Task ConnectNvmeControllerExRequestType + +func init() { + t["ConnectNvmeControllerEx_Task"] = reflect.TypeOf((*ConnectNvmeControllerEx_Task)(nil)).Elem() +} + +type ConnectNvmeControllerEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ConnectNvmeControllerRequestType struct { + This ManagedObjectReference `xml:"_this"` + ConnectSpec HostNvmeConnectSpec `xml:"connectSpec"` +} + +func init() { + t["ConnectNvmeControllerRequestType"] = reflect.TypeOf((*ConnectNvmeControllerRequestType)(nil)).Elem() +} + +type ConnectNvmeControllerResponse struct { +} + type ConnectedIso struct { OvfExport @@ -9756,6 +10711,7 @@ type CreateDirectoryRequestType struct { Datastore ManagedObjectReference `xml:"datastore"` DisplayName string `xml:"displayName,omitempty"` Policy string `xml:"policy,omitempty"` + Size int64 `xml:"size,omitempty"` } func init() { @@ -10023,6 +10979,43 @@ type CreateNvdimmNamespace_TaskResponse struct { Returnval ManagedObjectReference `xml:"returnval"` } +type CreateNvdimmPMemNamespaceRequestType struct { + This ManagedObjectReference `xml:"_this"` + CreateSpec NvdimmPMemNamespaceCreateSpec `xml:"createSpec"` +} + +func init() { + t["CreateNvdimmPMemNamespaceRequestType"] = reflect.TypeOf((*CreateNvdimmPMemNamespaceRequestType)(nil)).Elem() +} + +type CreateNvdimmPMemNamespace_Task CreateNvdimmPMemNamespaceRequestType + +func init() { + t["CreateNvdimmPMemNamespace_Task"] = reflect.TypeOf((*CreateNvdimmPMemNamespace_Task)(nil)).Elem() +} + +type CreateNvdimmPMemNamespace_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateNvmeOverRdmaAdapter CreateNvmeOverRdmaAdapterRequestType + +func init() { + t["CreateNvmeOverRdmaAdapter"] = reflect.TypeOf((*CreateNvmeOverRdmaAdapter)(nil)).Elem() +} + +type CreateNvmeOverRdmaAdapterRequestType struct { + This ManagedObjectReference `xml:"_this"` + RdmaDeviceName string `xml:"rdmaDeviceName"` +} + +func init() { + t["CreateNvmeOverRdmaAdapterRequestType"] = reflect.TypeOf((*CreateNvmeOverRdmaAdapterRequestType)(nil)).Elem() +} + +type CreateNvmeOverRdmaAdapterResponse struct { +} + type CreateObjectScheduledTask CreateObjectScheduledTaskRequestType func init() { @@ -10261,6 +11254,24 @@ type CreateSnapshot_TaskResponse struct { Returnval ManagedObjectReference `xml:"returnval"` } +type CreateSoftwareAdapter CreateSoftwareAdapterRequestType + +func init() { + t["CreateSoftwareAdapter"] = reflect.TypeOf((*CreateSoftwareAdapter)(nil)).Elem() +} + +type CreateSoftwareAdapterRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec BaseHostHbaCreateSpec `xml:"spec,typeattr"` +} + +func init() { + t["CreateSoftwareAdapterRequestType"] = reflect.TypeOf((*CreateSoftwareAdapterRequestType)(nil)).Elem() +} + +type CreateSoftwareAdapterResponse struct { +} + type CreateStoragePod CreateStoragePodRequestType func init() { @@ -10507,15 +11518,33 @@ func init() { type CryptoKeyResult struct { DynamicData - KeyId CryptoKeyId `xml:"keyId"` - Success bool `xml:"success"` - Reason string `xml:"reason,omitempty"` + KeyId CryptoKeyId `xml:"keyId"` + Success bool `xml:"success"` + Reason string `xml:"reason,omitempty"` + Fault *LocalizedMethodFault `xml:"fault,omitempty"` } func init() { t["CryptoKeyResult"] = reflect.TypeOf((*CryptoKeyResult)(nil)).Elem() } +type CryptoManagerHostDisable CryptoManagerHostDisableRequestType + +func init() { + t["CryptoManagerHostDisable"] = reflect.TypeOf((*CryptoManagerHostDisable)(nil)).Elem() +} + +type CryptoManagerHostDisableRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["CryptoManagerHostDisableRequestType"] = reflect.TypeOf((*CryptoManagerHostDisableRequestType)(nil)).Elem() +} + +type CryptoManagerHostDisableResponse struct { +} + type CryptoManagerHostEnable CryptoManagerHostEnableRequestType func init() { @@ -10573,6 +11602,8 @@ type CryptoManagerKmipClusterStatus struct { DynamicData ClusterId KeyProviderId `xml:"clusterId"` + OverallStatus ManagedEntityStatus `xml:"overallStatus,omitempty"` + ManagementType string `xml:"managementType,omitempty"` Servers []CryptoManagerKmipServerStatus `xml:"servers"` ClientCertInfo *CryptoManagerKmipCertificateInfo `xml:"clientCertInfo,omitempty"` } @@ -10581,6 +11612,21 @@ func init() { t["CryptoManagerKmipClusterStatus"] = reflect.TypeOf((*CryptoManagerKmipClusterStatus)(nil)).Elem() } +type CryptoManagerKmipCryptoKeyStatus struct { + DynamicData + + KeyId CryptoKeyId `xml:"keyId"` + KeyAvailable *bool `xml:"keyAvailable"` + Reason string `xml:"reason,omitempty"` + EncryptedVMs []ManagedObjectReference `xml:"encryptedVMs,omitempty"` + AffectedHosts []ManagedObjectReference `xml:"affectedHosts,omitempty"` + ReferencedByTags []string `xml:"referencedByTags,omitempty"` +} + +func init() { + t["CryptoManagerKmipCryptoKeyStatus"] = reflect.TypeOf((*CryptoManagerKmipCryptoKeyStatus)(nil)).Elem() +} + type CryptoManagerKmipServerCertInfo struct { DynamicData @@ -10821,6 +11867,17 @@ func init() { t["CustomizationAutoIpV6Generator"] = reflect.TypeOf((*CustomizationAutoIpV6Generator)(nil)).Elem() } +type CustomizationCloudinitPrep struct { + CustomizationIdentitySettings + + Metadata string `xml:"metadata"` + Userdata string `xml:"userdata,omitempty"` +} + +func init() { + t["CustomizationCloudinitPrep"] = reflect.TypeOf((*CustomizationCloudinitPrep)(nil)).Elem() +} + type CustomizationCustomIpGenerator struct { CustomizationIpGenerator @@ -10879,6 +11936,8 @@ func init() { type CustomizationFailed struct { CustomizationEvent + + Reason string `xml:"reason,omitempty"` } func init() { @@ -11064,6 +12123,7 @@ type CustomizationLinuxPrep struct { Domain string `xml:"domain"` TimeZone string `xml:"timeZone,omitempty"` HwClockUTC *bool `xml:"hwClockUTC"` + ScriptText string `xml:"scriptText,omitempty"` } func init() { @@ -11311,6 +12371,28 @@ func init() { t["CustomizationWinOptions"] = reflect.TypeOf((*CustomizationWinOptions)(nil)).Elem() } +type CustomizeGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + Spec CustomizationSpec `xml:"spec"` + ConfigParams []BaseOptionValue `xml:"configParams,omitempty,typeattr"` +} + +func init() { + t["CustomizeGuestRequestType"] = reflect.TypeOf((*CustomizeGuestRequestType)(nil)).Elem() +} + +type CustomizeGuest_Task CustomizeGuestRequestType + +func init() { + t["CustomizeGuest_Task"] = reflect.TypeOf((*CustomizeGuest_Task)(nil)).Elem() +} + +type CustomizeGuest_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + type CustomizeVMRequestType struct { This ManagedObjectReference `xml:"_this"` Spec CustomizationSpec `xml:"spec"` @@ -11433,6 +12515,7 @@ type DVPortgroupConfigInfo struct { DefaultPortConfig BaseDVPortSetting `xml:"defaultPortConfig,omitempty,typeattr"` Description string `xml:"description,omitempty"` Type string `xml:"type"` + BackingType string `xml:"backingType,omitempty"` Policy BaseDVPortgroupPolicy `xml:"policy,typeattr"` PortNameFormat string `xml:"portNameFormat,omitempty"` Scope []ManagedObjectReference `xml:"scope,omitempty"` @@ -11441,6 +12524,10 @@ type DVPortgroupConfigInfo struct { AutoExpand *bool `xml:"autoExpand"` VmVnicNetworkResourcePoolKey string `xml:"vmVnicNetworkResourcePoolKey,omitempty"` Uplink *bool `xml:"uplink"` + TransportZoneUuid string `xml:"transportZoneUuid,omitempty"` + TransportZoneName string `xml:"transportZoneName,omitempty"` + LogicalSwitchUuid string `xml:"logicalSwitchUuid,omitempty"` + SegmentId string `xml:"segmentId,omitempty"` } func init() { @@ -11457,11 +12544,16 @@ type DVPortgroupConfigSpec struct { DefaultPortConfig BaseDVPortSetting `xml:"defaultPortConfig,omitempty,typeattr"` Description string `xml:"description,omitempty"` Type string `xml:"type,omitempty"` + BackingType string `xml:"backingType,omitempty"` Scope []ManagedObjectReference `xml:"scope,omitempty"` Policy BaseDVPortgroupPolicy `xml:"policy,omitempty,typeattr"` VendorSpecificConfig []DistributedVirtualSwitchKeyedOpaqueBlob `xml:"vendorSpecificConfig,omitempty"` AutoExpand *bool `xml:"autoExpand"` VmVnicNetworkResourcePoolKey string `xml:"vmVnicNetworkResourcePoolKey,omitempty"` + TransportZoneUuid string `xml:"transportZoneUuid,omitempty"` + TransportZoneName string `xml:"transportZoneName,omitempty"` + LogicalSwitchUuid string `xml:"logicalSwitchUuid,omitempty"` + SegmentId string `xml:"segmentId,omitempty"` } func init() { @@ -11833,6 +12925,17 @@ type DVSManagerLookupDvPortGroupResponse struct { Returnval *ManagedObjectReference `xml:"returnval,omitempty"` } +type DVSManagerPhysicalNicsList struct { + DynamicData + + Host ManagedObjectReference `xml:"host"` + PhysicalNics []PhysicalNic `xml:"physicalNics,omitempty"` +} + +func init() { + t["DVSManagerPhysicalNicsList"] = reflect.TypeOf((*DVSManagerPhysicalNicsList)(nil)).Elem() +} + type DVSNameArrayUplinkPortPolicy struct { DVSUplinkPortPolicy @@ -12187,10 +13290,28 @@ func init() { t["DatabaseSizeParam"] = reflect.TypeOf((*DatabaseSizeParam)(nil)).Elem() } +type DatacenterBasicConnectInfo struct { + DynamicData + + Hostname string `xml:"hostname,omitempty"` + Error *LocalizedMethodFault `xml:"error,omitempty"` + ServerIp string `xml:"serverIp,omitempty"` + NumVm int32 `xml:"numVm,omitempty"` + NumPoweredOnVm int32 `xml:"numPoweredOnVm,omitempty"` + HostProductInfo *AboutInfo `xml:"hostProductInfo,omitempty"` + HardwareVendor string `xml:"hardwareVendor,omitempty"` + HardwareModel string `xml:"hardwareModel,omitempty"` +} + +func init() { + t["DatacenterBasicConnectInfo"] = reflect.TypeOf((*DatacenterBasicConnectInfo)(nil)).Elem() +} + type DatacenterConfigInfo struct { DynamicData DefaultHardwareVersionKey string `xml:"defaultHardwareVersionKey,omitempty"` + MaximumHardwareVersionKey string `xml:"maximumHardwareVersionKey,omitempty"` } func init() { @@ -12201,6 +13322,7 @@ type DatacenterConfigSpec struct { DynamicData DefaultHardwareVersionKey string `xml:"defaultHardwareVersionKey,omitempty"` + MaximumHardwareVersionKey string `xml:"maximumHardwareVersionKey,omitempty"` } func init() { @@ -12288,6 +13410,7 @@ type DatastoreCapability struct { VsanSparseSupported *bool `xml:"vsanSparseSupported"` UpitSupported *bool `xml:"upitSupported"` VmdkExpandSupported *bool `xml:"vmdkExpandSupported"` + ClusteredVmdkSupported *bool `xml:"clusteredVmdkSupported"` } func init() { @@ -12459,6 +13582,7 @@ type DatastoreInfo struct { MaxMemoryFileSize int64 `xml:"maxMemoryFileSize,omitempty"` Timestamp *time.Time `xml:"timestamp"` ContainerId string `xml:"containerId,omitempty"` + AliasOf string `xml:"aliasOf,omitempty"` } func init() { @@ -12909,6 +14033,26 @@ type DeleteSnapshot_TaskResponse struct { Returnval ManagedObjectReference `xml:"returnval"` } +type DeleteVStorageObjectExRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["DeleteVStorageObjectExRequestType"] = reflect.TypeOf((*DeleteVStorageObjectExRequestType)(nil)).Elem() +} + +type DeleteVStorageObjectEx_Task DeleteVStorageObjectExRequestType + +func init() { + t["DeleteVStorageObjectEx_Task"] = reflect.TypeOf((*DeleteVStorageObjectEx_Task)(nil)).Elem() +} + +type DeleteVStorageObjectEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + type DeleteVStorageObjectRequestType struct { This ManagedObjectReference `xml:"_this"` Id ID `xml:"id"` @@ -13069,6 +14213,50 @@ func init() { type DeselectVnicResponse struct { } +type DesiredSoftwareSpec struct { + DynamicData + + BaseImageSpec DesiredSoftwareSpecBaseImageSpec `xml:"baseImageSpec"` + VendorAddOnSpec *DesiredSoftwareSpecVendorAddOnSpec `xml:"vendorAddOnSpec,omitempty"` + Components []DesiredSoftwareSpecComponentSpec `xml:"components,omitempty"` +} + +func init() { + t["DesiredSoftwareSpec"] = reflect.TypeOf((*DesiredSoftwareSpec)(nil)).Elem() +} + +type DesiredSoftwareSpecBaseImageSpec struct { + DynamicData + + Version string `xml:"version"` +} + +func init() { + t["DesiredSoftwareSpecBaseImageSpec"] = reflect.TypeOf((*DesiredSoftwareSpecBaseImageSpec)(nil)).Elem() +} + +type DesiredSoftwareSpecComponentSpec struct { + DynamicData + + Name string `xml:"name"` + Version string `xml:"version,omitempty"` +} + +func init() { + t["DesiredSoftwareSpecComponentSpec"] = reflect.TypeOf((*DesiredSoftwareSpecComponentSpec)(nil)).Elem() +} + +type DesiredSoftwareSpecVendorAddOnSpec struct { + DynamicData + + Name string `xml:"name"` + Version string `xml:"version"` +} + +func init() { + t["DesiredSoftwareSpecVendorAddOnSpec"] = reflect.TypeOf((*DesiredSoftwareSpecVendorAddOnSpec)(nil)).Elem() +} + type DestinationSwitchFull struct { CannotAccessNetwork } @@ -13495,6 +14683,17 @@ func init() { t["DeviceUnsupportedForVmVersionFault"] = reflect.TypeOf((*DeviceUnsupportedForVmVersionFault)(nil)).Elem() } +type DiagnosticManagerAuditRecordResult struct { + DynamicData + + Records []string `xml:"records,omitempty"` + NextToken string `xml:"nextToken"` +} + +func init() { + t["DiagnosticManagerAuditRecordResult"] = reflect.TypeOf((*DiagnosticManagerAuditRecordResult)(nil)).Elem() +} + type DiagnosticManagerBundleInfo struct { DynamicData @@ -13575,6 +14774,43 @@ func init() { t["DisableAdminNotSupportedFault"] = reflect.TypeOf((*DisableAdminNotSupportedFault)(nil)).Elem() } +type DisableAlarm DisableAlarmRequestType + +func init() { + t["DisableAlarm"] = reflect.TypeOf((*DisableAlarm)(nil)).Elem() +} + +type DisableAlarmRequestType struct { + This ManagedObjectReference `xml:"_this"` + Alarm ManagedObjectReference `xml:"alarm"` + Entity ManagedObjectReference `xml:"entity"` +} + +func init() { + t["DisableAlarmRequestType"] = reflect.TypeOf((*DisableAlarmRequestType)(nil)).Elem() +} + +type DisableAlarmResponse struct { +} + +type DisableClusteredVmdkSupport DisableClusteredVmdkSupportRequestType + +func init() { + t["DisableClusteredVmdkSupport"] = reflect.TypeOf((*DisableClusteredVmdkSupport)(nil)).Elem() +} + +type DisableClusteredVmdkSupportRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["DisableClusteredVmdkSupportRequestType"] = reflect.TypeOf((*DisableClusteredVmdkSupportRequestType)(nil)).Elem() +} + +type DisableClusteredVmdkSupportResponse struct { +} + type DisableEvcModeRequestType struct { This ManagedObjectReference `xml:"_this"` } @@ -13784,6 +15020,43 @@ type DisconnectHost_TaskResponse struct { Returnval ManagedObjectReference `xml:"returnval"` } +type DisconnectNvmeController DisconnectNvmeControllerRequestType + +func init() { + t["DisconnectNvmeController"] = reflect.TypeOf((*DisconnectNvmeController)(nil)).Elem() +} + +type DisconnectNvmeControllerExRequestType struct { + This ManagedObjectReference `xml:"_this"` + DisconnectSpec []HostNvmeDisconnectSpec `xml:"disconnectSpec,omitempty"` +} + +func init() { + t["DisconnectNvmeControllerExRequestType"] = reflect.TypeOf((*DisconnectNvmeControllerExRequestType)(nil)).Elem() +} + +type DisconnectNvmeControllerEx_Task DisconnectNvmeControllerExRequestType + +func init() { + t["DisconnectNvmeControllerEx_Task"] = reflect.TypeOf((*DisconnectNvmeControllerEx_Task)(nil)).Elem() +} + +type DisconnectNvmeControllerEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DisconnectNvmeControllerRequestType struct { + This ManagedObjectReference `xml:"_this"` + DisconnectSpec HostNvmeDisconnectSpec `xml:"disconnectSpec"` +} + +func init() { + t["DisconnectNvmeControllerRequestType"] = reflect.TypeOf((*DisconnectNvmeControllerRequestType)(nil)).Elem() +} + +type DisconnectNvmeControllerResponse struct { +} + type DisconnectedHostsBlockingEVC struct { EVCConfigFault } @@ -13816,6 +15089,25 @@ func init() { type DiscoverFcoeHbasResponse struct { } +type DiscoverNvmeControllers DiscoverNvmeControllersRequestType + +func init() { + t["DiscoverNvmeControllers"] = reflect.TypeOf((*DiscoverNvmeControllers)(nil)).Elem() +} + +type DiscoverNvmeControllersRequestType struct { + This ManagedObjectReference `xml:"_this"` + DiscoverSpec HostNvmeDiscoverSpec `xml:"discoverSpec"` +} + +func init() { + t["DiscoverNvmeControllersRequestType"] = reflect.TypeOf((*DiscoverNvmeControllersRequestType)(nil)).Elem() +} + +type DiscoverNvmeControllersResponse struct { + Returnval HostNvmeDiscoveryLog `xml:"returnval"` +} + type DiskChangeExtent struct { DynamicData @@ -13839,6 +15131,17 @@ func init() { t["DiskChangeInfo"] = reflect.TypeOf((*DiskChangeInfo)(nil)).Elem() } +type DiskCryptoSpec struct { + DynamicData + + Parent *DiskCryptoSpec `xml:"parent,omitempty"` + Crypto BaseCryptoSpec `xml:"crypto,typeattr"` +} + +func init() { + t["DiskCryptoSpec"] = reflect.TypeOf((*DiskCryptoSpec)(nil)).Elem() +} + type DiskHasPartitions struct { VsanDiskFault } @@ -13972,6 +15275,8 @@ type DistributedVirtualPort struct { ConnectionCookie int32 `xml:"connectionCookie,omitempty"` LastStatusChange time.Time `xml:"lastStatusChange"` HostLocalPort *bool `xml:"hostLocalPort"` + ExternalId string `xml:"externalId,omitempty"` + SegmentPortId string `xml:"segmentPortId,omitempty"` } func init() { @@ -13989,12 +15294,37 @@ type DistributedVirtualPortgroupInfo struct { UplinkPortgroup bool `xml:"uplinkPortgroup"` Portgroup ManagedObjectReference `xml:"portgroup"` NetworkReservationSupported *bool `xml:"networkReservationSupported"` + BackingType string `xml:"backingType,omitempty"` + LogicalSwitchUuid string `xml:"logicalSwitchUuid,omitempty"` + SegmentId string `xml:"segmentId,omitempty"` } func init() { t["DistributedVirtualPortgroupInfo"] = reflect.TypeOf((*DistributedVirtualPortgroupInfo)(nil)).Elem() } +type DistributedVirtualPortgroupNsxPortgroupOperationResult struct { + DynamicData + + Portgroups []ManagedObjectReference `xml:"portgroups,omitempty"` + Problems []DistributedVirtualPortgroupProblem `xml:"problems,omitempty"` +} + +func init() { + t["DistributedVirtualPortgroupNsxPortgroupOperationResult"] = reflect.TypeOf((*DistributedVirtualPortgroupNsxPortgroupOperationResult)(nil)).Elem() +} + +type DistributedVirtualPortgroupProblem struct { + DynamicData + + LogicalSwitchUuid string `xml:"logicalSwitchUuid"` + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["DistributedVirtualPortgroupProblem"] = reflect.TypeOf((*DistributedVirtualPortgroupProblem)(nil)).Elem() +} + type DistributedVirtualSwitchHostMember struct { DynamicData @@ -14021,10 +15351,16 @@ func init() { type DistributedVirtualSwitchHostMemberConfigInfo struct { DynamicData - Host *ManagedObjectReference `xml:"host,omitempty"` - MaxProxySwitchPorts int32 `xml:"maxProxySwitchPorts"` - VendorSpecificConfig []DistributedVirtualSwitchKeyedOpaqueBlob `xml:"vendorSpecificConfig,omitempty"` - Backing BaseDistributedVirtualSwitchHostMemberBacking `xml:"backing,typeattr"` + Host *ManagedObjectReference `xml:"host,omitempty"` + MaxProxySwitchPorts int32 `xml:"maxProxySwitchPorts"` + VendorSpecificConfig []DistributedVirtualSwitchKeyedOpaqueBlob `xml:"vendorSpecificConfig,omitempty"` + Backing BaseDistributedVirtualSwitchHostMemberBacking `xml:"backing,typeattr"` + NsxSwitch *bool `xml:"nsxSwitch"` + EnsEnabled *bool `xml:"ensEnabled"` + EnsInterruptEnabled *bool `xml:"ensInterruptEnabled"` + TransportZones []DistributedVirtualSwitchHostMemberTransportZoneInfo `xml:"transportZones,omitempty"` + NsxtUsedUplinkNames []string `xml:"nsxtUsedUplinkNames,omitempty"` + NetworkOffloadingEnabled *bool `xml:"networkOffloadingEnabled"` } func init() { @@ -14078,6 +15414,17 @@ func init() { t["DistributedVirtualSwitchHostMemberRuntimeState"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberRuntimeState)(nil)).Elem() } +type DistributedVirtualSwitchHostMemberTransportZoneInfo struct { + DynamicData + + Uuid string `xml:"uuid"` + Type string `xml:"type"` +} + +func init() { + t["DistributedVirtualSwitchHostMemberTransportZoneInfo"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberTransportZoneInfo)(nil)).Elem() +} + type DistributedVirtualSwitchHostProductSpec struct { DynamicData @@ -14198,6 +15545,18 @@ func init() { t["DistributedVirtualSwitchManagerImportResult"] = reflect.TypeOf((*DistributedVirtualSwitchManagerImportResult)(nil)).Elem() } +type DistributedVirtualSwitchNetworkOffloadSpec struct { + DynamicData + + Id string `xml:"id"` + Name string `xml:"name,omitempty"` + Types []string `xml:"types,omitempty"` +} + +func init() { + t["DistributedVirtualSwitchNetworkOffloadSpec"] = reflect.TypeOf((*DistributedVirtualSwitchNetworkOffloadSpec)(nil)).Elem() +} + type DistributedVirtualSwitchPortConnectee struct { DynamicData @@ -14230,6 +15589,7 @@ type DistributedVirtualSwitchPortCriteria struct { Connected *bool `xml:"connected"` Active *bool `xml:"active"` UplinkPort *bool `xml:"uplinkPort"` + NsxPort *bool `xml:"nsxPort"` Scope *ManagedObjectReference `xml:"scope,omitempty"` PortgroupKey []string `xml:"portgroupKey,omitempty"` Inside *bool `xml:"inside"` @@ -14319,6 +15679,69 @@ func init() { t["DomainNotFoundFault"] = reflect.TypeOf((*DomainNotFoundFault)(nil)).Elem() } +type DownloadDescriptionTree DownloadDescriptionTreeRequestType + +func init() { + t["DownloadDescriptionTree"] = reflect.TypeOf((*DownloadDescriptionTree)(nil)).Elem() +} + +type DownloadDescriptionTreeRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DownloadDescriptionTreeRequestType"] = reflect.TypeOf((*DownloadDescriptionTreeRequestType)(nil)).Elem() +} + +type DownloadDescriptionTreeResponse struct { + Returnval []byte `xml:"returnval"` +} + +type DpuStatusInfo struct { + HostHardwareElementInfo + + DpuId string `xml:"dpuId"` + Fru *HostFru `xml:"fru,omitempty"` + Sensors []DpuStatusInfoOperationalInfo `xml:"sensors,omitempty"` +} + +func init() { + t["DpuStatusInfo"] = reflect.TypeOf((*DpuStatusInfo)(nil)).Elem() +} + +type DpuStatusInfoOperationalInfo struct { + DynamicData + + SensorId string `xml:"sensorId"` + HealthState BaseElementDescription `xml:"healthState,omitempty,typeattr"` + Reading string `xml:"reading"` + Units string `xml:"units,omitempty"` + TimeStamp *time.Time `xml:"timeStamp"` +} + +func init() { + t["DpuStatusInfoOperationalInfo"] = reflect.TypeOf((*DpuStatusInfoOperationalInfo)(nil)).Elem() +} + +type DropConnections DropConnectionsRequestType + +func init() { + t["DropConnections"] = reflect.TypeOf((*DropConnections)(nil)).Elem() +} + +type DropConnectionsRequestType struct { + This ManagedObjectReference `xml:"_this"` + ListOfConnections []BaseVirtualMachineConnection `xml:"listOfConnections,omitempty,typeattr"` +} + +func init() { + t["DropConnectionsRequestType"] = reflect.TypeOf((*DropConnectionsRequestType)(nil)).Elem() +} + +type DropConnectionsResponse struct { + Returnval bool `xml:"returnval"` +} + type DrsDisabledEvent struct { ClusterEvent } @@ -15774,6 +17197,12 @@ func init() { t["ElementDescription"] = reflect.TypeOf((*ElementDescription)(nil)).Elem() } +type EnableAlarm EnableAlarmRequestType + +func init() { + t["EnableAlarm"] = reflect.TypeOf((*EnableAlarm)(nil)).Elem() +} + type EnableAlarmActions EnableAlarmActionsRequestType func init() { @@ -15793,6 +17222,37 @@ func init() { type EnableAlarmActionsResponse struct { } +type EnableAlarmRequestType struct { + This ManagedObjectReference `xml:"_this"` + Alarm ManagedObjectReference `xml:"alarm"` + Entity ManagedObjectReference `xml:"entity"` +} + +func init() { + t["EnableAlarmRequestType"] = reflect.TypeOf((*EnableAlarmRequestType)(nil)).Elem() +} + +type EnableAlarmResponse struct { +} + +type EnableClusteredVmdkSupport EnableClusteredVmdkSupportRequestType + +func init() { + t["EnableClusteredVmdkSupport"] = reflect.TypeOf((*EnableClusteredVmdkSupport)(nil)).Elem() +} + +type EnableClusteredVmdkSupportRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["EnableClusteredVmdkSupportRequestType"] = reflect.TypeOf((*EnableClusteredVmdkSupportRequestType)(nil)).Elem() +} + +type EnableClusteredVmdkSupportResponse struct { +} + type EnableCrypto EnableCryptoRequestType func init() { @@ -16724,6 +18184,26 @@ type ExtendDisk_TaskResponse struct { Returnval ManagedObjectReference `xml:"returnval"` } +type ExtendHCIRequestType struct { + This ManagedObjectReference `xml:"_this"` + HostInputs []ClusterComputeResourceHostConfigurationInput `xml:"hostInputs,omitempty"` + VSanConfigSpec *SDDCBase `xml:"vSanConfigSpec,omitempty"` +} + +func init() { + t["ExtendHCIRequestType"] = reflect.TypeOf((*ExtendHCIRequestType)(nil)).Elem() +} + +type ExtendHCI_Task ExtendHCIRequestType + +func init() { + t["ExtendHCI_Task"] = reflect.TypeOf((*ExtendHCI_Task)(nil)).Elem() +} + +type ExtendHCI_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + type ExtendVffs ExtendVffsRequestType func init() { @@ -17400,6 +18880,18 @@ func init() { t["FcoeFaultPnicHasNoPortSetFault"] = reflect.TypeOf((*FcoeFaultPnicHasNoPortSetFault)(nil)).Elem() } +type FeatureEVCMode struct { + ElementDescription + + Mask []HostFeatureMask `xml:"mask,omitempty"` + Capability []HostFeatureCapability `xml:"capability,omitempty"` + Requirement []VirtualMachineFeatureRequirement `xml:"requirement,omitempty"` +} + +func init() { + t["FeatureEVCMode"] = reflect.TypeOf((*FeatureEVCMode)(nil)).Elem() +} + type FeatureRequirementsNotMet struct { VirtualHardwareCompatibilityIssue @@ -17418,6 +18910,25 @@ func init() { t["FeatureRequirementsNotMetFault"] = reflect.TypeOf((*FeatureRequirementsNotMetFault)(nil)).Elem() } +type FetchAuditRecords FetchAuditRecordsRequestType + +func init() { + t["FetchAuditRecords"] = reflect.TypeOf((*FetchAuditRecords)(nil)).Elem() +} + +type FetchAuditRecordsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Token string `xml:"token,omitempty"` +} + +func init() { + t["FetchAuditRecordsRequestType"] = reflect.TypeOf((*FetchAuditRecordsRequestType)(nil)).Elem() +} + +type FetchAuditRecordsResponse struct { + Returnval DiagnosticManagerAuditRecordResult `xml:"returnval"` +} + type FetchDVPortKeys FetchDVPortKeysRequestType func init() { @@ -17993,6 +19504,29 @@ func init() { t["FloppyImageFileQuery"] = reflect.TypeOf((*FloppyImageFileQuery)(nil)).Elem() } +type FolderBatchAddHostsToClusterResult struct { + DynamicData + + HostsAddedToCluster []ManagedObjectReference `xml:"hostsAddedToCluster,omitempty"` + HostsFailedInventoryAdd []FolderFailedHostResult `xml:"hostsFailedInventoryAdd,omitempty"` + HostsFailedMoveToCluster []FolderFailedHostResult `xml:"hostsFailedMoveToCluster,omitempty"` +} + +func init() { + t["FolderBatchAddHostsToClusterResult"] = reflect.TypeOf((*FolderBatchAddHostsToClusterResult)(nil)).Elem() +} + +type FolderBatchAddStandaloneHostsResult struct { + DynamicData + + AddedHosts []ManagedObjectReference `xml:"addedHosts,omitempty"` + HostsFailedInventoryAdd []FolderFailedHostResult `xml:"hostsFailedInventoryAdd,omitempty"` +} + +func init() { + t["FolderBatchAddStandaloneHostsResult"] = reflect.TypeOf((*FolderBatchAddStandaloneHostsResult)(nil)).Elem() +} + type FolderEventArgument struct { EntityEventArgument @@ -18003,6 +19537,19 @@ func init() { t["FolderEventArgument"] = reflect.TypeOf((*FolderEventArgument)(nil)).Elem() } +type FolderFailedHostResult struct { + DynamicData + + HostName string `xml:"hostName,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` + Context LocalizableMessage `xml:"context"` + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["FolderFailedHostResult"] = reflect.TypeOf((*FolderFailedHostResult)(nil)).Elem() +} + type FolderFileInfo struct { FileInfo } @@ -18019,6 +19566,17 @@ func init() { t["FolderFileQuery"] = reflect.TypeOf((*FolderFileQuery)(nil)).Elem() } +type FolderNewHostSpec struct { + DynamicData + + HostCnxSpec HostConnectSpec `xml:"hostCnxSpec"` + EsxLicense string `xml:"esxLicense,omitempty"` +} + +func init() { + t["FolderNewHostSpec"] = reflect.TypeOf((*FolderNewHostSpec)(nil)).Elem() +} + type FormatVffs FormatVffsRequestType func init() { @@ -18546,6 +20104,26 @@ type GetCustomizationSpecResponse struct { Returnval CustomizationSpecItem `xml:"returnval"` } +type GetDefaultKmsCluster GetDefaultKmsClusterRequestType + +func init() { + t["GetDefaultKmsCluster"] = reflect.TypeOf((*GetDefaultKmsCluster)(nil)).Elem() +} + +type GetDefaultKmsClusterRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity *ManagedObjectReference `xml:"entity,omitempty"` + DefaultsToParent *bool `xml:"defaultsToParent"` +} + +func init() { + t["GetDefaultKmsClusterRequestType"] = reflect.TypeOf((*GetDefaultKmsClusterRequestType)(nil)).Elem() +} + +type GetDefaultKmsClusterResponse struct { + Returnval *KeyProviderId `xml:"returnval,omitempty"` +} + type GetPublicKey GetPublicKeyRequestType func init() { @@ -18582,6 +20160,42 @@ type GetResourceUsageResponse struct { Returnval ClusterResourceUsageSummary `xml:"returnval"` } +type GetSiteInfo GetSiteInfoRequestType + +func init() { + t["GetSiteInfo"] = reflect.TypeOf((*GetSiteInfo)(nil)).Elem() +} + +type GetSiteInfoRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["GetSiteInfoRequestType"] = reflect.TypeOf((*GetSiteInfoRequestType)(nil)).Elem() +} + +type GetSiteInfoResponse struct { + Returnval SiteInfo `xml:"returnval"` +} + +type GetSystemVMsRestrictedDatastores GetSystemVMsRestrictedDatastoresRequestType + +func init() { + t["GetSystemVMsRestrictedDatastores"] = reflect.TypeOf((*GetSystemVMsRestrictedDatastores)(nil)).Elem() +} + +type GetSystemVMsRestrictedDatastoresRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["GetSystemVMsRestrictedDatastoresRequestType"] = reflect.TypeOf((*GetSystemVMsRestrictedDatastoresRequestType)(nil)).Elem() +} + +type GetSystemVMsRestrictedDatastoresResponse struct { + Returnval []ManagedObjectReference `xml:"returnval,omitempty"` +} + type GetVchaClusterHealth GetVchaClusterHealthRequestType func init() { @@ -18752,9 +20366,11 @@ func init() { type GuestDiskInfo struct { DynamicData - DiskPath string `xml:"diskPath,omitempty"` - Capacity int64 `xml:"capacity,omitempty"` - FreeSpace int64 `xml:"freeSpace,omitempty"` + DiskPath string `xml:"diskPath,omitempty"` + Capacity int64 `xml:"capacity,omitempty"` + FreeSpace int64 `xml:"freeSpace,omitempty"` + FilesystemType string `xml:"filesystemType,omitempty"` + Mappings []GuestInfoVirtualDiskMapping `xml:"mappings,omitempty"` } func init() { @@ -18812,12 +20428,27 @@ type GuestInfo struct { InteractiveGuestOperationsReady *bool `xml:"interactiveGuestOperationsReady"` GuestStateChangeSupported *bool `xml:"guestStateChangeSupported"` GenerationInfo []GuestInfoNamespaceGenerationInfo `xml:"generationInfo,omitempty"` + HwVersion string `xml:"hwVersion,omitempty"` + CustomizationInfo *GuestInfoCustomizationInfo `xml:"customizationInfo,omitempty"` } func init() { t["GuestInfo"] = reflect.TypeOf((*GuestInfo)(nil)).Elem() } +type GuestInfoCustomizationInfo struct { + DynamicData + + CustomizationStatus string `xml:"customizationStatus"` + StartTime *time.Time `xml:"startTime"` + EndTime *time.Time `xml:"endTime"` + ErrorMsg string `xml:"errorMsg,omitempty"` +} + +func init() { + t["GuestInfoCustomizationInfo"] = reflect.TypeOf((*GuestInfoCustomizationInfo)(nil)).Elem() +} + type GuestInfoNamespaceGenerationInfo struct { DynamicData @@ -18829,6 +20460,16 @@ func init() { t["GuestInfoNamespaceGenerationInfo"] = reflect.TypeOf((*GuestInfoNamespaceGenerationInfo)(nil)).Elem() } +type GuestInfoVirtualDiskMapping struct { + DynamicData + + Key int32 `xml:"key"` +} + +func init() { + t["GuestInfoVirtualDiskMapping"] = reflect.TypeOf((*GuestInfoVirtualDiskMapping)(nil)).Elem() +} + type GuestListFileInfo struct { DynamicData @@ -18973,7 +20614,11 @@ type GuestOsDescriptor struct { NumRecommendedCoresPerSocket int32 `xml:"numRecommendedCoresPerSocket,omitempty"` VvtdSupported *BoolOption `xml:"vvtdSupported,omitempty"` VbsSupported *BoolOption `xml:"vbsSupported,omitempty"` + VsgxSupported *BoolOption `xml:"vsgxSupported,omitempty"` + VsgxRemoteAttestationSupported *bool `xml:"vsgxRemoteAttestationSupported"` SupportsTPM20 *bool `xml:"supportsTPM20"` + RecommendedTPM20 *bool `xml:"recommendedTPM20"` + VwdtSupported *bool `xml:"vwdtSupported"` } func init() { @@ -19704,6 +21349,39 @@ func init() { t["HostApplyProfile"] = reflect.TypeOf((*HostApplyProfile)(nil)).Elem() } +type HostAssignableHardwareBinding struct { + DynamicData + + InstanceId string `xml:"instanceId"` + Vm ManagedObjectReference `xml:"vm"` +} + +func init() { + t["HostAssignableHardwareBinding"] = reflect.TypeOf((*HostAssignableHardwareBinding)(nil)).Elem() +} + +type HostAssignableHardwareConfig struct { + DynamicData + + AttributeOverride []HostAssignableHardwareConfigAttributeOverride `xml:"attributeOverride,omitempty"` +} + +func init() { + t["HostAssignableHardwareConfig"] = reflect.TypeOf((*HostAssignableHardwareConfig)(nil)).Elem() +} + +type HostAssignableHardwareConfigAttributeOverride struct { + DynamicData + + InstanceId string `xml:"instanceId"` + Name string `xml:"name"` + Value AnyType `xml:"value,typeattr"` +} + +func init() { + t["HostAssignableHardwareConfigAttributeOverride"] = reflect.TypeOf((*HostAssignableHardwareConfigAttributeOverride)(nil)).Elem() +} + type HostAuthenticationManagerInfo struct { DynamicData @@ -19925,6 +21603,34 @@ type HostCapability struct { VmCreateDateSupported *bool `xml:"vmCreateDateSupported"` Vmfs3EOLSupported *bool `xml:"vmfs3EOLSupported"` FtVmcpSupported *bool `xml:"ftVmcpSupported"` + QuickBootSupported *bool `xml:"quickBootSupported"` + EncryptedFtSupported *bool `xml:"encryptedFtSupported"` + AssignableHardwareSupported *bool `xml:"assignableHardwareSupported"` + SuspendToMemorySupported *bool `xml:"suspendToMemorySupported"` + UseFeatureReqsForOldHWv *bool `xml:"useFeatureReqsForOldHWv"` + MarkPerenniallyReservedSupported *bool `xml:"markPerenniallyReservedSupported"` + HppPspSupported *bool `xml:"hppPspSupported"` + DeviceRebindWithoutRebootSupported *bool `xml:"deviceRebindWithoutRebootSupported"` + StoragePolicyChangeSupported *bool `xml:"storagePolicyChangeSupported"` + PrecisionTimeProtocolSupported *bool `xml:"precisionTimeProtocolSupported"` + RemoteDeviceVMotionSupported *bool `xml:"remoteDeviceVMotionSupported"` + MaxSupportedVmMemory int32 `xml:"maxSupportedVmMemory,omitempty"` + AhDeviceHintsSupported *bool `xml:"ahDeviceHintsSupported"` + NvmeOverTcpSupported *bool `xml:"nvmeOverTcpSupported"` + NvmeStorageFabricServicesSupported *bool `xml:"nvmeStorageFabricServicesSupported"` + AssignHwPciConfigSupported *bool `xml:"assignHwPciConfigSupported"` + TimeConfigSupported *bool `xml:"timeConfigSupported"` + NvmeBatchOperationsSupported *bool `xml:"nvmeBatchOperationsSupported"` + PMemFailoverSupported *bool `xml:"pMemFailoverSupported"` + HostConfigEncryptionSupported *bool `xml:"hostConfigEncryptionSupported"` + MaxSupportedSimultaneousThreads int32 `xml:"maxSupportedSimultaneousThreads,omitempty"` + PtpConfigSupported *bool `xml:"ptpConfigSupported"` + MaxSupportedPtpPorts int32 `xml:"maxSupportedPtpPorts,omitempty"` + SgxRegistrationSupported *bool `xml:"sgxRegistrationSupported"` + PMemIndependentSnapshotSupported *bool `xml:"pMemIndependentSnapshotSupported"` + IommuSLDirtyCapable *bool `xml:"iommuSLDirtyCapable"` + UltralowFixedUnmapSupported *bool `xml:"ultralowFixedUnmapSupported"` + NvmeVvolSupported *bool `xml:"nvmeVvolSupported"` } func init() { @@ -20221,12 +21927,15 @@ type HostConfigInfo struct { DomainList []string `xml:"domainList,omitempty"` ScriptCheckSum []byte `xml:"scriptCheckSum,omitempty"` HostConfigCheckSum []byte `xml:"hostConfigCheckSum,omitempty"` + DescriptionTreeCheckSum []byte `xml:"descriptionTreeCheckSum,omitempty"` GraphicsInfo []HostGraphicsInfo `xml:"graphicsInfo,omitempty"` SharedPassthruGpuTypes []string `xml:"sharedPassthruGpuTypes,omitempty"` GraphicsConfig *HostGraphicsConfig `xml:"graphicsConfig,omitempty"` SharedGpuCapabilities []HostSharedGpuCapabilities `xml:"sharedGpuCapabilities,omitempty"` IoFilterInfo []HostIoFilterInfo `xml:"ioFilterInfo,omitempty"` SriovDevicePool []BaseHostSriovDevicePoolInfo `xml:"sriovDevicePool,omitempty,typeattr"` + AssignableHardwareBinding []HostAssignableHardwareBinding `xml:"assignableHardwareBinding,omitempty"` + AssignableHardwareConfig *HostAssignableHardwareConfig `xml:"assignableHardwareConfig,omitempty"` } func init() { @@ -20274,6 +21983,7 @@ type HostConfigManager struct { CertificateManager *ManagedObjectReference `xml:"certificateManager,omitempty"` CryptoManager *ManagedObjectReference `xml:"cryptoManager,omitempty"` NvdimmSystem *ManagedObjectReference `xml:"nvdimmSystem,omitempty"` + AssignableHardwareManager *ManagedObjectReference `xml:"assignableHardwareManager,omitempty"` } func init() { @@ -20301,6 +22011,7 @@ type HostConfigSpec struct { ActiveDirectory []HostActiveDirectory `xml:"activeDirectory,omitempty"` GenericConfig []KeyAnyValue `xml:"genericConfig,omitempty"` GraphicsConfig *HostGraphicsConfig `xml:"graphicsConfig,omitempty"` + AssignableHardwareConfig *HostAssignableHardwareConfig `xml:"assignableHardwareConfig,omitempty"` } func init() { @@ -20574,6 +22285,16 @@ func init() { t["HostDasOkEvent"] = reflect.TypeOf((*HostDasOkEvent)(nil)).Elem() } +type HostDataTransportConnectionInfo struct { + DynamicData + + StaticMemoryConsumed int64 `xml:"staticMemoryConsumed"` +} + +func init() { + t["HostDataTransportConnectionInfo"] = reflect.TypeOf((*HostDataTransportConnectionInfo)(nil)).Elem() +} + type HostDatastoreBrowserSearchResults struct { DynamicData @@ -20668,8 +22389,14 @@ func init() { type HostDateTimeConfig struct { DynamicData - TimeZone string `xml:"timeZone,omitempty"` - NtpConfig *HostNtpConfig `xml:"ntpConfig,omitempty"` + TimeZone string `xml:"timeZone,omitempty"` + NtpConfig *HostNtpConfig `xml:"ntpConfig,omitempty"` + PtpConfig *HostPtpConfig `xml:"ptpConfig,omitempty"` + Protocol string `xml:"protocol,omitempty"` + Enabled *bool `xml:"enabled"` + DisableEvents *bool `xml:"disableEvents"` + DisableFallback *bool `xml:"disableFallback"` + ResetToFactoryDefaults *bool `xml:"resetToFactoryDefaults"` } func init() { @@ -20679,14 +22406,38 @@ func init() { type HostDateTimeInfo struct { DynamicData - TimeZone HostDateTimeSystemTimeZone `xml:"timeZone"` - NtpConfig *HostNtpConfig `xml:"ntpConfig,omitempty"` + TimeZone HostDateTimeSystemTimeZone `xml:"timeZone"` + SystemClockProtocol string `xml:"systemClockProtocol,omitempty"` + NtpConfig *HostNtpConfig `xml:"ntpConfig,omitempty"` + PtpConfig *HostPtpConfig `xml:"ptpConfig,omitempty"` + Enabled *bool `xml:"enabled"` + DisableEvents *bool `xml:"disableEvents"` + DisableFallback *bool `xml:"disableFallback"` + InFallbackState *bool `xml:"inFallbackState"` + ServiceSync *bool `xml:"serviceSync"` + LastSyncTime *time.Time `xml:"lastSyncTime"` + RemoteNtpServer string `xml:"remoteNtpServer,omitempty"` + NtpRunTime int64 `xml:"ntpRunTime,omitempty"` + PtpRunTime int64 `xml:"ptpRunTime,omitempty"` + NtpDuration string `xml:"ntpDuration,omitempty"` + PtpDuration string `xml:"ptpDuration,omitempty"` } func init() { t["HostDateTimeInfo"] = reflect.TypeOf((*HostDateTimeInfo)(nil)).Elem() } +type HostDateTimeSystemServiceTestResult struct { + DynamicData + + WorkingNormally bool `xml:"workingNormally"` + Report []string `xml:"report,omitempty"` +} + +func init() { + t["HostDateTimeSystemServiceTestResult"] = reflect.TypeOf((*HostDateTimeSystemServiceTestResult)(nil)).Elem() +} + type HostDateTimeSystemTimeZone struct { DynamicData @@ -20700,6 +22451,26 @@ func init() { t["HostDateTimeSystemTimeZone"] = reflect.TypeOf((*HostDateTimeSystemTimeZone)(nil)).Elem() } +type HostDeleteVStorageObjectExRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["HostDeleteVStorageObjectExRequestType"] = reflect.TypeOf((*HostDeleteVStorageObjectExRequestType)(nil)).Elem() +} + +type HostDeleteVStorageObjectEx_Task HostDeleteVStorageObjectExRequestType + +func init() { + t["HostDeleteVStorageObjectEx_Task"] = reflect.TypeOf((*HostDeleteVStorageObjectEx_Task)(nil)).Elem() +} + +type HostDeleteVStorageObjectEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + type HostDeleteVStorageObjectRequestType struct { This ManagedObjectReference `xml:"_this"` Id ID `xml:"id"` @@ -21045,6 +22816,19 @@ func init() { t["HostDnsConfigSpec"] = reflect.TypeOf((*HostDnsConfigSpec)(nil)).Elem() } +type HostDvxClass struct { + DynamicData + + DeviceClass string `xml:"deviceClass"` + CheckpointSupported bool `xml:"checkpointSupported"` + SwDMATracingSupported bool `xml:"swDMATracingSupported"` + SriovNic bool `xml:"sriovNic"` +} + +func init() { + t["HostDvxClass"] = reflect.TypeOf((*HostDvxClass)(nil)).Elem() +} + type HostEnableAdminFailedEvent struct { HostEvent @@ -21191,7 +22975,7 @@ type HostFibreChannelOverEthernetHba struct { UnderlyingNic string `xml:"underlyingNic"` LinkInfo HostFibreChannelOverEthernetHbaLinkInfo `xml:"linkInfo"` IsSoftwareFcoe bool `xml:"isSoftwareFcoe"` - MarkedForRemoval bool `xml:"markedForRemoval"` + MarkedForRemoval *bool `xml:"markedForRemoval"` } func init() { @@ -21408,6 +23192,21 @@ func init() { t["HostForceMountedInfo"] = reflect.TypeOf((*HostForceMountedInfo)(nil)).Elem() } +type HostFru struct { + DynamicData + + Type string `xml:"type"` + PartName string `xml:"partName"` + PartNumber string `xml:"partNumber"` + Manufacturer string `xml:"manufacturer"` + SerialNumber string `xml:"serialNumber,omitempty"` + MfgTimeStamp *time.Time `xml:"mfgTimeStamp"` +} + +func init() { + t["HostFru"] = reflect.TypeOf((*HostFru)(nil)).Elem() +} + type HostGatewaySpec struct { DynamicData @@ -21508,10 +23307,15 @@ type HostHardwareInfo struct { NumaInfo *HostNumaInfo `xml:"numaInfo,omitempty"` SmcPresent *bool `xml:"smcPresent"` PciDevice []HostPciDevice `xml:"pciDevice,omitempty"` + DvxClasses []HostDvxClass `xml:"dvxClasses,omitempty"` CpuFeature []HostCpuIdInfo `xml:"cpuFeature,omitempty"` BiosInfo *HostBIOSInfo `xml:"biosInfo,omitempty"` ReliableMemoryInfo *HostReliableMemoryInfo `xml:"reliableMemoryInfo,omitempty"` PersistentMemoryInfo *HostPersistentMemoryInfo `xml:"persistentMemoryInfo,omitempty"` + SgxInfo *HostSgxInfo `xml:"sgxInfo,omitempty"` + SevInfo *HostSevInfo `xml:"sevInfo,omitempty"` + MemoryTieringType string `xml:"memoryTieringType,omitempty"` + MemoryTierInfo []HostMemoryTierInfo `xml:"memoryTierInfo,omitempty"` } func init() { @@ -21524,6 +23328,7 @@ type HostHardwareStatusInfo struct { MemoryStatusInfo []BaseHostHardwareElementInfo `xml:"memoryStatusInfo,omitempty,typeattr"` CpuStatusInfo []BaseHostHardwareElementInfo `xml:"cpuStatusInfo,omitempty,typeattr"` StorageStatusInfo []HostStorageElementInfo `xml:"storageStatusInfo,omitempty"` + DpuStatusInfo []DpuStatusInfo `xml:"dpuStatusInfo,omitempty"` } func init() { @@ -21569,16 +23374,25 @@ func init() { t["HostHasComponentFailureFault"] = reflect.TypeOf((*HostHasComponentFailureFault)(nil)).Elem() } +type HostHbaCreateSpec struct { + DynamicData +} + +func init() { + t["HostHbaCreateSpec"] = reflect.TypeOf((*HostHbaCreateSpec)(nil)).Elem() +} + type HostHostBusAdapter struct { DynamicData - Key string `xml:"key,omitempty"` - Device string `xml:"device"` - Bus int32 `xml:"bus"` - Status string `xml:"status"` - Model string `xml:"model"` - Driver string `xml:"driver,omitempty"` - Pci string `xml:"pci,omitempty"` + Key string `xml:"key,omitempty"` + Device string `xml:"device"` + Bus int32 `xml:"bus"` + Status string `xml:"status"` + Model string `xml:"model"` + Driver string `xml:"driver,omitempty"` + Pci string `xml:"pci,omitempty"` + StorageProtocol string `xml:"storageProtocol,omitempty"` } func init() { @@ -22239,19 +24053,21 @@ func init() { type HostListSummary struct { DynamicData - Host *ManagedObjectReference `xml:"host,omitempty"` - Hardware *HostHardwareSummary `xml:"hardware,omitempty"` - Runtime *HostRuntimeInfo `xml:"runtime,omitempty"` - Config HostConfigSummary `xml:"config"` - QuickStats HostListSummaryQuickStats `xml:"quickStats"` - OverallStatus ManagedEntityStatus `xml:"overallStatus"` - RebootRequired bool `xml:"rebootRequired"` - CustomValue []BaseCustomFieldValue `xml:"customValue,omitempty,typeattr"` - ManagementServerIp string `xml:"managementServerIp,omitempty"` - MaxEVCModeKey string `xml:"maxEVCModeKey,omitempty"` - CurrentEVCModeKey string `xml:"currentEVCModeKey,omitempty"` - Gateway *HostListSummaryGatewaySummary `xml:"gateway,omitempty"` - TpmAttestation *HostTpmAttestationInfo `xml:"tpmAttestation,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` + Hardware *HostHardwareSummary `xml:"hardware,omitempty"` + Runtime *HostRuntimeInfo `xml:"runtime,omitempty"` + Config HostConfigSummary `xml:"config"` + QuickStats HostListSummaryQuickStats `xml:"quickStats"` + OverallStatus ManagedEntityStatus `xml:"overallStatus"` + RebootRequired bool `xml:"rebootRequired"` + CustomValue []BaseCustomFieldValue `xml:"customValue,omitempty,typeattr"` + ManagementServerIp string `xml:"managementServerIp,omitempty"` + MaxEVCModeKey string `xml:"maxEVCModeKey,omitempty"` + CurrentEVCModeKey string `xml:"currentEVCModeKey,omitempty"` + CurrentEVCGraphicsModeKey string `xml:"currentEVCGraphicsModeKey,omitempty"` + Gateway *HostListSummaryGatewaySummary `xml:"gateway,omitempty"` + TpmAttestation *HostTpmAttestationInfo `xml:"tpmAttestation,omitempty"` + TrustAuthorityAttestationInfos []HostTrustAuthorityAttestationInfo `xml:"trustAuthorityAttestationInfos,omitempty"` } func init() { @@ -22446,6 +24262,7 @@ type HostMaintenanceSpec struct { DynamicData VsanMode *VsanHostDecommissionMode `xml:"vsanMode,omitempty"` + Purpose string `xml:"purpose,omitempty"` } func init() { @@ -22468,6 +24285,8 @@ type HostMemberRuntimeInfo struct { Host ManagedObjectReference `xml:"host"` Status string `xml:"status,omitempty"` StatusDetail string `xml:"statusDetail,omitempty"` + NsxtStatus string `xml:"nsxtStatus,omitempty"` + NsxtStatusDetail string `xml:"nsxtStatusDetail,omitempty"` HealthCheckResult []BaseHostMemberHealthCheckResult `xml:"healthCheckResult,omitempty,typeattr"` } @@ -22503,6 +24322,19 @@ func init() { t["HostMemorySpec"] = reflect.TypeOf((*HostMemorySpec)(nil)).Elem() } +type HostMemoryTierInfo struct { + DynamicData + + Name string `xml:"name"` + Type string `xml:"type"` + Flags []string `xml:"flags,omitempty"` + Size int64 `xml:"size"` +} + +func init() { + t["HostMemoryTierInfo"] = reflect.TypeOf((*HostMemoryTierInfo)(nil)).Elem() +} + type HostMissingNetworksEvent struct { HostDasEvent @@ -22532,6 +24364,7 @@ type HostMountInfo struct { Mounted *bool `xml:"mounted"` Accessible *bool `xml:"accessible"` InaccessibleReason string `xml:"inaccessibleReason,omitempty"` + MountFailedReason string `xml:"mountFailedReason,omitempty"` } func init() { @@ -22558,6 +24391,20 @@ func init() { t["HostMultipathInfoFixedLogicalUnitPolicy"] = reflect.TypeOf((*HostMultipathInfoFixedLogicalUnitPolicy)(nil)).Elem() } +type HostMultipathInfoHppLogicalUnitPolicy struct { + HostMultipathInfoLogicalUnitPolicy + + Bytes int64 `xml:"bytes,omitempty"` + Iops int64 `xml:"iops,omitempty"` + Path string `xml:"path,omitempty"` + LatencyEvalTime int64 `xml:"latencyEvalTime,omitempty"` + SamplingIosPerPath int64 `xml:"samplingIosPerPath,omitempty"` +} + +func init() { + t["HostMultipathInfoHppLogicalUnitPolicy"] = reflect.TypeOf((*HostMultipathInfoHppLogicalUnitPolicy)(nil)).Elem() +} + type HostMultipathInfoLogicalUnit struct { DynamicData @@ -22772,6 +24619,7 @@ type HostNetCapabilities struct { DnsConfigSupported bool `xml:"dnsConfigSupported"` DhcpOnVnicSupported bool `xml:"dhcpOnVnicSupported"` IpV6Supported *bool `xml:"ipV6Supported"` + BackupNfcNiocSupported *bool `xml:"backupNfcNiocSupported"` } func init() { @@ -22824,6 +24672,7 @@ type HostNetworkConfig struct { Nat []HostNatServiceConfig `xml:"nat,omitempty"` IpV6Enabled *bool `xml:"ipV6Enabled"` NetStackSpec []HostNetworkConfigNetStackSpec `xml:"netStackSpec,omitempty"` + MigrationStatus string `xml:"migrationStatus,omitempty"` } func init() { @@ -22855,23 +24704,27 @@ func init() { type HostNetworkInfo struct { DynamicData - Vswitch []HostVirtualSwitch `xml:"vswitch,omitempty"` - ProxySwitch []HostProxySwitch `xml:"proxySwitch,omitempty"` - Portgroup []HostPortGroup `xml:"portgroup,omitempty"` - Pnic []PhysicalNic `xml:"pnic,omitempty"` - Vnic []HostVirtualNic `xml:"vnic,omitempty"` - ConsoleVnic []HostVirtualNic `xml:"consoleVnic,omitempty"` - DnsConfig BaseHostDnsConfig `xml:"dnsConfig,omitempty,typeattr"` - IpRouteConfig BaseHostIpRouteConfig `xml:"ipRouteConfig,omitempty,typeattr"` - ConsoleIpRouteConfig BaseHostIpRouteConfig `xml:"consoleIpRouteConfig,omitempty,typeattr"` - RouteTableInfo *HostIpRouteTableInfo `xml:"routeTableInfo,omitempty"` - Dhcp []HostDhcpService `xml:"dhcp,omitempty"` - Nat []HostNatService `xml:"nat,omitempty"` - IpV6Enabled *bool `xml:"ipV6Enabled"` - AtBootIpV6Enabled *bool `xml:"atBootIpV6Enabled"` - NetStackInstance []HostNetStackInstance `xml:"netStackInstance,omitempty"` - OpaqueSwitch []HostOpaqueSwitch `xml:"opaqueSwitch,omitempty"` - OpaqueNetwork []HostOpaqueNetworkInfo `xml:"opaqueNetwork,omitempty"` + Vswitch []HostVirtualSwitch `xml:"vswitch,omitempty"` + ProxySwitch []HostProxySwitch `xml:"proxySwitch,omitempty"` + Portgroup []HostPortGroup `xml:"portgroup,omitempty"` + Pnic []PhysicalNic `xml:"pnic,omitempty"` + RdmaDevice []HostRdmaDevice `xml:"rdmaDevice,omitempty"` + Vnic []HostVirtualNic `xml:"vnic,omitempty"` + ConsoleVnic []HostVirtualNic `xml:"consoleVnic,omitempty"` + DnsConfig BaseHostDnsConfig `xml:"dnsConfig,omitempty,typeattr"` + IpRouteConfig BaseHostIpRouteConfig `xml:"ipRouteConfig,omitempty,typeattr"` + ConsoleIpRouteConfig BaseHostIpRouteConfig `xml:"consoleIpRouteConfig,omitempty,typeattr"` + RouteTableInfo *HostIpRouteTableInfo `xml:"routeTableInfo,omitempty"` + Dhcp []HostDhcpService `xml:"dhcp,omitempty"` + Nat []HostNatService `xml:"nat,omitempty"` + IpV6Enabled *bool `xml:"ipV6Enabled"` + AtBootIpV6Enabled *bool `xml:"atBootIpV6Enabled"` + NetStackInstance []HostNetStackInstance `xml:"netStackInstance,omitempty"` + OpaqueSwitch []HostOpaqueSwitch `xml:"opaqueSwitch,omitempty"` + OpaqueNetwork []HostOpaqueNetworkInfo `xml:"opaqueNetwork,omitempty"` + NsxTransportNodeId string `xml:"nsxTransportNodeId,omitempty"` + NvdsToVdsMigrationRequired *bool `xml:"nvdsToVdsMigrationRequired"` + MigrationStatus string `xml:"migrationStatus,omitempty"` } func init() { @@ -22934,6 +24787,16 @@ func init() { t["HostNewNetworkConnectInfo"] = reflect.TypeOf((*HostNewNetworkConnectInfo)(nil)).Elem() } +type HostNfcConnectionInfo struct { + HostDataTransportConnectionInfo + + StreamingMemoryConsumed int64 `xml:"streamingMemoryConsumed,omitempty"` +} + +func init() { + t["HostNfcConnectionInfo"] = reflect.TypeOf((*HostNfcConnectionInfo)(nil)).Elem() +} + type HostNicFailureCriteria struct { DynamicData @@ -23074,6 +24937,7 @@ type HostNumaNode struct { TypeId byte `xml:"typeId"` CpuID []int16 `xml:"cpuID"` + MemorySize int64 `xml:"memorySize,omitempty"` MemoryRangeBegin int64 `xml:"memoryRangeBegin"` MemoryRangeLength int64 `xml:"memoryRangeLength"` PciId []string `xml:"pciId,omitempty"` @@ -23094,13 +24958,206 @@ type HostNumericSensorInfo struct { RateUnits string `xml:"rateUnits,omitempty"` SensorType string `xml:"sensorType"` Id string `xml:"id,omitempty"` + SensorNumber int64 `xml:"sensorNumber,omitempty"` TimeStamp string `xml:"timeStamp,omitempty"` + Fru *HostFru `xml:"fru,omitempty"` } func init() { t["HostNumericSensorInfo"] = reflect.TypeOf((*HostNumericSensorInfo)(nil)).Elem() } +type HostNvmeConnectSpec struct { + HostNvmeSpec + + Subnqn string `xml:"subnqn"` + ControllerId int32 `xml:"controllerId,omitempty"` + AdminQueueSize int32 `xml:"adminQueueSize,omitempty"` + KeepAliveTimeout int32 `xml:"keepAliveTimeout,omitempty"` +} + +func init() { + t["HostNvmeConnectSpec"] = reflect.TypeOf((*HostNvmeConnectSpec)(nil)).Elem() +} + +type HostNvmeController struct { + DynamicData + + Key string `xml:"key"` + ControllerNumber int32 `xml:"controllerNumber"` + Subnqn string `xml:"subnqn"` + Name string `xml:"name"` + AssociatedAdapter string `xml:"associatedAdapter"` + TransportType string `xml:"transportType"` + FusedOperationSupported bool `xml:"fusedOperationSupported"` + NumberOfQueues int32 `xml:"numberOfQueues"` + QueueSize int32 `xml:"queueSize"` + AttachedNamespace []HostNvmeNamespace `xml:"attachedNamespace,omitempty"` + VendorId string `xml:"vendorId,omitempty"` + Model string `xml:"model,omitempty"` + SerialNumber string `xml:"serialNumber,omitempty"` + FirmwareVersion string `xml:"firmwareVersion,omitempty"` +} + +func init() { + t["HostNvmeController"] = reflect.TypeOf((*HostNvmeController)(nil)).Elem() +} + +type HostNvmeDisconnectSpec struct { + DynamicData + + HbaName string `xml:"hbaName"` + Subnqn string `xml:"subnqn,omitempty"` + ControllerNumber int32 `xml:"controllerNumber,omitempty"` +} + +func init() { + t["HostNvmeDisconnectSpec"] = reflect.TypeOf((*HostNvmeDisconnectSpec)(nil)).Elem() +} + +type HostNvmeDiscoverSpec struct { + HostNvmeSpec + + AutoConnect *bool `xml:"autoConnect"` + RootDiscoveryController *bool `xml:"rootDiscoveryController"` +} + +func init() { + t["HostNvmeDiscoverSpec"] = reflect.TypeOf((*HostNvmeDiscoverSpec)(nil)).Elem() +} + +type HostNvmeDiscoveryLog struct { + DynamicData + + Entry []HostNvmeDiscoveryLogEntry `xml:"entry,omitempty"` + Complete bool `xml:"complete"` +} + +func init() { + t["HostNvmeDiscoveryLog"] = reflect.TypeOf((*HostNvmeDiscoveryLog)(nil)).Elem() +} + +type HostNvmeDiscoveryLogEntry struct { + DynamicData + + Subnqn string `xml:"subnqn"` + SubsystemType string `xml:"subsystemType"` + SubsystemPortId int32 `xml:"subsystemPortId"` + ControllerId int32 `xml:"controllerId"` + AdminQueueMaxSize int32 `xml:"adminQueueMaxSize"` + TransportParameters BaseHostNvmeTransportParameters `xml:"transportParameters,typeattr"` + TransportRequirements string `xml:"transportRequirements"` + Connected bool `xml:"connected"` +} + +func init() { + t["HostNvmeDiscoveryLogEntry"] = reflect.TypeOf((*HostNvmeDiscoveryLogEntry)(nil)).Elem() +} + +type HostNvmeNamespace struct { + DynamicData + + Key string `xml:"key"` + Name string `xml:"name"` + Id int32 `xml:"id"` + BlockSize int32 `xml:"blockSize"` + CapacityInBlocks int64 `xml:"capacityInBlocks"` +} + +func init() { + t["HostNvmeNamespace"] = reflect.TypeOf((*HostNvmeNamespace)(nil)).Elem() +} + +type HostNvmeOpaqueTransportParameters struct { + HostNvmeTransportParameters + + Trtype string `xml:"trtype"` + Traddr string `xml:"traddr"` + Adrfam string `xml:"adrfam"` + Trsvcid string `xml:"trsvcid"` + Tsas []byte `xml:"tsas"` +} + +func init() { + t["HostNvmeOpaqueTransportParameters"] = reflect.TypeOf((*HostNvmeOpaqueTransportParameters)(nil)).Elem() +} + +type HostNvmeOverFibreChannelParameters struct { + HostNvmeTransportParameters + + NodeWorldWideName int64 `xml:"nodeWorldWideName"` + PortWorldWideName int64 `xml:"portWorldWideName"` +} + +func init() { + t["HostNvmeOverFibreChannelParameters"] = reflect.TypeOf((*HostNvmeOverFibreChannelParameters)(nil)).Elem() +} + +type HostNvmeOverRdmaParameters struct { + HostNvmeTransportParameters + + Address string `xml:"address"` + AddressFamily string `xml:"addressFamily,omitempty"` + PortNumber int32 `xml:"portNumber,omitempty"` +} + +func init() { + t["HostNvmeOverRdmaParameters"] = reflect.TypeOf((*HostNvmeOverRdmaParameters)(nil)).Elem() +} + +type HostNvmeOverTcpParameters struct { + HostNvmeTransportParameters + + Address string `xml:"address"` + PortNumber int32 `xml:"portNumber,omitempty"` + DigestVerification string `xml:"digestVerification,omitempty"` +} + +func init() { + t["HostNvmeOverTcpParameters"] = reflect.TypeOf((*HostNvmeOverTcpParameters)(nil)).Elem() +} + +type HostNvmeSpec struct { + DynamicData + + HbaName string `xml:"hbaName"` + TransportParameters BaseHostNvmeTransportParameters `xml:"transportParameters,typeattr"` +} + +func init() { + t["HostNvmeSpec"] = reflect.TypeOf((*HostNvmeSpec)(nil)).Elem() +} + +type HostNvmeTopology struct { + DynamicData + + Adapter []HostNvmeTopologyInterface `xml:"adapter,omitempty"` +} + +func init() { + t["HostNvmeTopology"] = reflect.TypeOf((*HostNvmeTopology)(nil)).Elem() +} + +type HostNvmeTopologyInterface struct { + DynamicData + + Key string `xml:"key"` + Adapter string `xml:"adapter"` + ConnectedController []HostNvmeController `xml:"connectedController,omitempty"` +} + +func init() { + t["HostNvmeTopologyInterface"] = reflect.TypeOf((*HostNvmeTopologyInterface)(nil)).Elem() +} + +type HostNvmeTransportParameters struct { + DynamicData +} + +func init() { + t["HostNvmeTransportParameters"] = reflect.TypeOf((*HostNvmeTransportParameters)(nil)).Elem() +} + type HostOpaqueNetworkInfo struct { DynamicData @@ -23283,6 +25340,8 @@ type HostPciPassthruConfig struct { Id string `xml:"id"` PassthruEnabled bool `xml:"passthruEnabled"` + ApplyNow *bool `xml:"applyNow"` + HardwareLabel string `xml:"hardwareLabel,omitempty"` } func init() { @@ -23297,12 +25356,29 @@ type HostPciPassthruInfo struct { PassthruEnabled bool `xml:"passthruEnabled"` PassthruCapable bool `xml:"passthruCapable"` PassthruActive bool `xml:"passthruActive"` + HardwareLabel string `xml:"hardwareLabel,omitempty"` } func init() { t["HostPciPassthruInfo"] = reflect.TypeOf((*HostPciPassthruInfo)(nil)).Elem() } +type HostPcieHba struct { + HostHostBusAdapter +} + +func init() { + t["HostPcieHba"] = reflect.TypeOf((*HostPcieHba)(nil)).Elem() +} + +type HostPcieTargetTransport struct { + HostTargetTransport +} + +func init() { + t["HostPcieTargetTransport"] = reflect.TypeOf((*HostPcieTargetTransport)(nil)).Elem() +} + type HostPersistentMemoryInfo struct { DynamicData @@ -23738,18 +25814,27 @@ func init() { type HostProxySwitch struct { DynamicData - DvsUuid string `xml:"dvsUuid"` - DvsName string `xml:"dvsName"` - Key string `xml:"key"` - NumPorts int32 `xml:"numPorts"` - ConfigNumPorts int32 `xml:"configNumPorts,omitempty"` - NumPortsAvailable int32 `xml:"numPortsAvailable"` - UplinkPort []KeyValue `xml:"uplinkPort,omitempty"` - Mtu int32 `xml:"mtu,omitempty"` - Pnic []string `xml:"pnic,omitempty"` - Spec HostProxySwitchSpec `xml:"spec"` - HostLag []HostProxySwitchHostLagConfig `xml:"hostLag,omitempty"` - NetworkReservationSupported *bool `xml:"networkReservationSupported"` + DvsUuid string `xml:"dvsUuid"` + DvsName string `xml:"dvsName"` + Key string `xml:"key"` + NumPorts int32 `xml:"numPorts"` + ConfigNumPorts int32 `xml:"configNumPorts,omitempty"` + NumPortsAvailable int32 `xml:"numPortsAvailable"` + UplinkPort []KeyValue `xml:"uplinkPort,omitempty"` + Mtu int32 `xml:"mtu,omitempty"` + Pnic []string `xml:"pnic,omitempty"` + Spec HostProxySwitchSpec `xml:"spec"` + HostLag []HostProxySwitchHostLagConfig `xml:"hostLag,omitempty"` + NetworkReservationSupported *bool `xml:"networkReservationSupported"` + NsxtEnabled *bool `xml:"nsxtEnabled"` + EnsEnabled *bool `xml:"ensEnabled"` + EnsInterruptEnabled *bool `xml:"ensInterruptEnabled"` + TransportZones []DistributedVirtualSwitchHostMemberTransportZoneInfo `xml:"transportZones,omitempty"` + NsxUsedUplinkPort []string `xml:"nsxUsedUplinkPort,omitempty"` + NsxtStatus string `xml:"nsxtStatus,omitempty"` + NsxtStatusDetail string `xml:"nsxtStatusDetail,omitempty"` + EnsInfo *HostProxySwitchEnsInfo `xml:"ensInfo,omitempty"` + NetworkOffloadingEnabled *bool `xml:"networkOffloadingEnabled"` } func init() { @@ -23768,6 +25853,20 @@ func init() { t["HostProxySwitchConfig"] = reflect.TypeOf((*HostProxySwitchConfig)(nil)).Elem() } +type HostProxySwitchEnsInfo struct { + DynamicData + + OpsVersion int64 `xml:"opsVersion"` + NumPSOps int64 `xml:"numPSOps"` + NumLcoreOps int64 `xml:"numLcoreOps"` + ErrorStatus int64 `xml:"errorStatus"` + LcoreStatus int64 `xml:"lcoreStatus"` +} + +func init() { + t["HostProxySwitchEnsInfo"] = reflect.TypeOf((*HostProxySwitchEnsInfo)(nil)).Elem() +} + type HostProxySwitchHostLagConfig struct { DynamicData @@ -23790,6 +25889,117 @@ func init() { t["HostProxySwitchSpec"] = reflect.TypeOf((*HostProxySwitchSpec)(nil)).Elem() } +type HostPtpConfig struct { + DynamicData + + Domain int32 `xml:"domain,omitempty"` + Port []HostPtpConfigPtpPort `xml:"port,omitempty"` +} + +func init() { + t["HostPtpConfig"] = reflect.TypeOf((*HostPtpConfig)(nil)).Elem() +} + +type HostPtpConfigPtpPort struct { + DynamicData + + Index int32 `xml:"index"` + DeviceType string `xml:"deviceType,omitempty"` + Device string `xml:"device,omitempty"` + IpConfig *HostIpConfig `xml:"ipConfig,omitempty"` +} + +func init() { + t["HostPtpConfigPtpPort"] = reflect.TypeOf((*HostPtpConfigPtpPort)(nil)).Elem() +} + +type HostQualifiedName struct { + DynamicData + + Value string `xml:"value"` + Type string `xml:"type"` +} + +func init() { + t["HostQualifiedName"] = reflect.TypeOf((*HostQualifiedName)(nil)).Elem() +} + +type HostRdmaDevice struct { + DynamicData + + Key string `xml:"key"` + Device string `xml:"device"` + Driver string `xml:"driver,omitempty"` + Description string `xml:"description,omitempty"` + Backing BaseHostRdmaDeviceBacking `xml:"backing,omitempty,typeattr"` + ConnectionInfo HostRdmaDeviceConnectionInfo `xml:"connectionInfo"` + Capability HostRdmaDeviceCapability `xml:"capability"` +} + +func init() { + t["HostRdmaDevice"] = reflect.TypeOf((*HostRdmaDevice)(nil)).Elem() +} + +type HostRdmaDeviceBacking struct { + DynamicData +} + +func init() { + t["HostRdmaDeviceBacking"] = reflect.TypeOf((*HostRdmaDeviceBacking)(nil)).Elem() +} + +type HostRdmaDeviceCapability struct { + DynamicData + + RoceV1Capable bool `xml:"roceV1Capable"` + RoceV2Capable bool `xml:"roceV2Capable"` + IWarpCapable bool `xml:"iWarpCapable"` +} + +func init() { + t["HostRdmaDeviceCapability"] = reflect.TypeOf((*HostRdmaDeviceCapability)(nil)).Elem() +} + +type HostRdmaDeviceConnectionInfo struct { + DynamicData + + State string `xml:"state"` + Mtu int32 `xml:"mtu"` + SpeedInMbps int32 `xml:"speedInMbps"` +} + +func init() { + t["HostRdmaDeviceConnectionInfo"] = reflect.TypeOf((*HostRdmaDeviceConnectionInfo)(nil)).Elem() +} + +type HostRdmaDevicePnicBacking struct { + HostRdmaDeviceBacking + + PairedUplink string `xml:"pairedUplink"` +} + +func init() { + t["HostRdmaDevicePnicBacking"] = reflect.TypeOf((*HostRdmaDevicePnicBacking)(nil)).Elem() +} + +type HostRdmaHba struct { + HostHostBusAdapter + + AssociatedRdmaDevice string `xml:"associatedRdmaDevice,omitempty"` +} + +func init() { + t["HostRdmaHba"] = reflect.TypeOf((*HostRdmaHba)(nil)).Elem() +} + +type HostRdmaTargetTransport struct { + HostTargetTransport +} + +func init() { + t["HostRdmaTargetTransport"] = reflect.TypeOf((*HostRdmaTargetTransport)(nil)).Elem() +} + type HostReconcileDatastoreInventoryRequestType struct { This ManagedObjectReference `xml:"_this"` Datastore ManagedObjectReference `xml:"datastore"` @@ -23949,10 +26159,55 @@ func init() { t["HostRetrieveVStorageObject"] = reflect.TypeOf((*HostRetrieveVStorageObject)(nil)).Elem() } +type HostRetrieveVStorageObjectMetadata HostRetrieveVStorageObjectMetadataRequestType + +func init() { + t["HostRetrieveVStorageObjectMetadata"] = reflect.TypeOf((*HostRetrieveVStorageObjectMetadata)(nil)).Elem() +} + +type HostRetrieveVStorageObjectMetadataRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + SnapshotId *ID `xml:"snapshotId,omitempty"` + Prefix string `xml:"prefix,omitempty"` +} + +func init() { + t["HostRetrieveVStorageObjectMetadataRequestType"] = reflect.TypeOf((*HostRetrieveVStorageObjectMetadataRequestType)(nil)).Elem() +} + +type HostRetrieveVStorageObjectMetadataResponse struct { + Returnval []KeyValue `xml:"returnval,omitempty"` +} + +type HostRetrieveVStorageObjectMetadataValue HostRetrieveVStorageObjectMetadataValueRequestType + +func init() { + t["HostRetrieveVStorageObjectMetadataValue"] = reflect.TypeOf((*HostRetrieveVStorageObjectMetadataValue)(nil)).Elem() +} + +type HostRetrieveVStorageObjectMetadataValueRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + SnapshotId *ID `xml:"snapshotId,omitempty"` + Key string `xml:"key"` +} + +func init() { + t["HostRetrieveVStorageObjectMetadataValueRequestType"] = reflect.TypeOf((*HostRetrieveVStorageObjectMetadataValueRequestType)(nil)).Elem() +} + +type HostRetrieveVStorageObjectMetadataValueResponse struct { + Returnval string `xml:"returnval"` +} + type HostRetrieveVStorageObjectRequestType struct { - This ManagedObjectReference `xml:"_this"` - Id ID `xml:"id"` - Datastore ManagedObjectReference `xml:"datastore"` + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + DiskInfoFlags []string `xml:"diskInfoFlags,omitempty"` } func init() { @@ -23986,21 +26241,23 @@ type HostRetrieveVStorageObjectStateResponse struct { type HostRuntimeInfo struct { DynamicData - ConnectionState HostSystemConnectionState `xml:"connectionState"` - PowerState HostSystemPowerState `xml:"powerState"` - StandbyMode string `xml:"standbyMode,omitempty"` - InMaintenanceMode bool `xml:"inMaintenanceMode"` - InQuarantineMode *bool `xml:"inQuarantineMode"` - BootTime *time.Time `xml:"bootTime"` - HealthSystemRuntime *HealthSystemRuntime `xml:"healthSystemRuntime,omitempty"` - DasHostState *ClusterDasFdmHostState `xml:"dasHostState,omitempty"` - TpmPcrValues []HostTpmDigestInfo `xml:"tpmPcrValues,omitempty"` - VsanRuntimeInfo *VsanHostRuntimeInfo `xml:"vsanRuntimeInfo,omitempty"` - NetworkRuntimeInfo *HostRuntimeInfoNetworkRuntimeInfo `xml:"networkRuntimeInfo,omitempty"` - VFlashResourceRuntimeInfo *HostVFlashManagerVFlashResourceRunTimeInfo `xml:"vFlashResourceRuntimeInfo,omitempty"` - HostMaxVirtualDiskCapacity int64 `xml:"hostMaxVirtualDiskCapacity,omitempty"` - CryptoState string `xml:"cryptoState,omitempty"` - CryptoKeyId *CryptoKeyId `xml:"cryptoKeyId,omitempty"` + ConnectionState HostSystemConnectionState `xml:"connectionState"` + PowerState HostSystemPowerState `xml:"powerState"` + StandbyMode string `xml:"standbyMode,omitempty"` + InMaintenanceMode bool `xml:"inMaintenanceMode"` + InQuarantineMode *bool `xml:"inQuarantineMode"` + BootTime *time.Time `xml:"bootTime"` + HealthSystemRuntime *HealthSystemRuntime `xml:"healthSystemRuntime,omitempty"` + DasHostState *ClusterDasFdmHostState `xml:"dasHostState,omitempty"` + TpmPcrValues []HostTpmDigestInfo `xml:"tpmPcrValues,omitempty"` + VsanRuntimeInfo *VsanHostRuntimeInfo `xml:"vsanRuntimeInfo,omitempty"` + NetworkRuntimeInfo *HostRuntimeInfoNetworkRuntimeInfo `xml:"networkRuntimeInfo,omitempty"` + VFlashResourceRuntimeInfo *HostVFlashManagerVFlashResourceRunTimeInfo `xml:"vFlashResourceRuntimeInfo,omitempty"` + HostMaxVirtualDiskCapacity int64 `xml:"hostMaxVirtualDiskCapacity,omitempty"` + CryptoState string `xml:"cryptoState,omitempty"` + CryptoKeyId *CryptoKeyId `xml:"cryptoKeyId,omitempty"` + StatelessNvdsMigrationReady string `xml:"statelessNvdsMigrationReady,omitempty"` + StateEncryption *HostRuntimeInfoStateEncryptionInfo `xml:"stateEncryption,omitempty"` } func init() { @@ -24032,6 +26289,18 @@ func init() { t["HostRuntimeInfoNetworkRuntimeInfo"] = reflect.TypeOf((*HostRuntimeInfoNetworkRuntimeInfo)(nil)).Elem() } +type HostRuntimeInfoStateEncryptionInfo struct { + DynamicData + + ProtectionMode string `xml:"protectionMode"` + RequireSecureBoot *bool `xml:"requireSecureBoot"` + RequireExecInstalledOnly *bool `xml:"requireExecInstalledOnly"` +} + +func init() { + t["HostRuntimeInfoStateEncryptionInfo"] = reflect.TypeOf((*HostRuntimeInfoStateEncryptionInfo)(nil)).Elem() +} + type HostScheduleReconcileDatastoreInventory HostScheduleReconcileDatastoreInventoryRequestType func init() { @@ -24239,6 +26508,46 @@ func init() { type HostSetVStorageObjectControlFlagsResponse struct { } +type HostSevInfo struct { + DynamicData + + SevState string `xml:"sevState"` + MaxSevEsGuests int64 `xml:"maxSevEsGuests"` +} + +func init() { + t["HostSevInfo"] = reflect.TypeOf((*HostSevInfo)(nil)).Elem() +} + +type HostSgxInfo struct { + DynamicData + + SgxState string `xml:"sgxState"` + TotalEpcMemory int64 `xml:"totalEpcMemory"` + FlcMode string `xml:"flcMode"` + LePubKeyHash string `xml:"lePubKeyHash,omitempty"` + RegistrationInfo *HostSgxRegistrationInfo `xml:"registrationInfo,omitempty"` +} + +func init() { + t["HostSgxInfo"] = reflect.TypeOf((*HostSgxInfo)(nil)).Elem() +} + +type HostSgxRegistrationInfo struct { + DynamicData + + Status string `xml:"status,omitempty"` + BiosError int32 `xml:"biosError,omitempty"` + RegistrationUrl string `xml:"registrationUrl,omitempty"` + Type string `xml:"type,omitempty"` + Ppid string `xml:"ppid,omitempty"` + LastRegisteredTime *time.Time `xml:"lastRegisteredTime"` +} + +func init() { + t["HostSgxRegistrationInfo"] = reflect.TypeOf((*HostSgxRegistrationInfo)(nil)).Elem() +} + type HostSharedGpuCapabilities struct { DynamicData @@ -24358,6 +26667,14 @@ func init() { t["HostSpecification"] = reflect.TypeOf((*HostSpecification)(nil)).Elem() } +type HostSpecificationChangedEvent struct { + HostEvent +} + +func init() { + t["HostSpecificationChangedEvent"] = reflect.TypeOf((*HostSpecificationChangedEvent)(nil)).Elem() +} + type HostSpecificationOperationFailed struct { VimFault @@ -24374,6 +26691,24 @@ func init() { t["HostSpecificationOperationFailedFault"] = reflect.TypeOf((*HostSpecificationOperationFailedFault)(nil)).Elem() } +type HostSpecificationRequireEvent struct { + HostEvent +} + +func init() { + t["HostSpecificationRequireEvent"] = reflect.TypeOf((*HostSpecificationRequireEvent)(nil)).Elem() +} + +type HostSpecificationUpdateEvent struct { + HostEvent + + HostSpec HostSpecification `xml:"hostSpec"` +} + +func init() { + t["HostSpecificationUpdateEvent"] = reflect.TypeOf((*HostSpecificationUpdateEvent)(nil)).Elem() +} + type HostSriovConfig struct { HostPciPassthruConfig @@ -24458,6 +26793,7 @@ type HostStorageDeviceInfo struct { HostBusAdapter []BaseHostHostBusAdapter `xml:"hostBusAdapter,omitempty,typeattr"` ScsiLun []BaseScsiLun `xml:"scsiLun,omitempty,typeattr"` ScsiTopology *HostScsiTopology `xml:"scsiTopology,omitempty"` + NvmeTopology *HostNvmeTopology `xml:"nvmeTopology,omitempty"` MultipathInfo *HostMultipathInfo `xml:"multipathInfo,omitempty"` PlugStoreTopology *HostPlugStoreTopology `xml:"plugStoreTopology,omitempty"` SoftwareInternetScsiEnabled bool `xml:"softwareInternetScsiEnabled"` @@ -24534,6 +26870,26 @@ func init() { t["HostSubSpecification"] = reflect.TypeOf((*HostSubSpecification)(nil)).Elem() } +type HostSubSpecificationDeleteEvent struct { + HostEvent + + SubSpecName string `xml:"subSpecName"` +} + +func init() { + t["HostSubSpecificationDeleteEvent"] = reflect.TypeOf((*HostSubSpecificationDeleteEvent)(nil)).Elem() +} + +type HostSubSpecificationUpdateEvent struct { + HostEvent + + HostSubSpec HostSubSpecification `xml:"hostSubSpec"` +} + +func init() { + t["HostSubSpecificationUpdateEvent"] = reflect.TypeOf((*HostSubSpecificationUpdateEvent)(nil)).Elem() +} + type HostSyncFailedEvent struct { HostEvent @@ -24584,6 +26940,9 @@ type HostSystemInfo struct { Uuid string `xml:"uuid"` OtherIdentifyingInfo []HostSystemIdentificationInfo `xml:"otherIdentifyingInfo,omitempty"` SerialNumber string `xml:"serialNumber,omitempty"` + QualifiedName []HostQualifiedName `xml:"qualifiedName,omitempty"` + VvolHostNQN *HostQualifiedName `xml:"vvolHostNQN,omitempty"` + VvolHostId string `xml:"vvolHostId,omitempty"` } func init() { @@ -24685,6 +27044,34 @@ func init() { t["HostTargetTransport"] = reflect.TypeOf((*HostTargetTransport)(nil)).Elem() } +type HostTcpHba struct { + HostHostBusAdapter + + AssociatedPnic string `xml:"associatedPnic,omitempty"` +} + +func init() { + t["HostTcpHba"] = reflect.TypeOf((*HostTcpHba)(nil)).Elem() +} + +type HostTcpHbaCreateSpec struct { + HostHbaCreateSpec + + Pnic string `xml:"pnic"` +} + +func init() { + t["HostTcpHbaCreateSpec"] = reflect.TypeOf((*HostTcpHbaCreateSpec)(nil)).Elem() +} + +type HostTcpTargetTransport struct { + HostTargetTransport +} + +func init() { + t["HostTcpTargetTransport"] = reflect.TypeOf((*HostTcpTargetTransport)(nil)).Elem() +} + type HostTpmAttestationInfo struct { DynamicData @@ -24761,6 +27148,14 @@ func init() { t["HostTpmEventLogEntry"] = reflect.TypeOf((*HostTpmEventLogEntry)(nil)).Elem() } +type HostTpmNvTagEventDetails struct { + HostTpmBootSecurityOptionEventDetails +} + +func init() { + t["HostTpmNvTagEventDetails"] = reflect.TypeOf((*HostTpmNvTagEventDetails)(nil)).Elem() +} + type HostTpmOptionEventDetails struct { HostTpmEventDetails @@ -24772,6 +27167,14 @@ func init() { t["HostTpmOptionEventDetails"] = reflect.TypeOf((*HostTpmOptionEventDetails)(nil)).Elem() } +type HostTpmSignerEventDetails struct { + HostTpmBootSecurityOptionEventDetails +} + +func init() { + t["HostTpmSignerEventDetails"] = reflect.TypeOf((*HostTpmSignerEventDetails)(nil)).Elem() +} + type HostTpmSoftwareComponentEventDetails struct { HostTpmEventDetails @@ -24785,6 +27188,30 @@ func init() { t["HostTpmSoftwareComponentEventDetails"] = reflect.TypeOf((*HostTpmSoftwareComponentEventDetails)(nil)).Elem() } +type HostTpmVersionEventDetails struct { + HostTpmEventDetails + + Version []byte `xml:"version"` +} + +func init() { + t["HostTpmVersionEventDetails"] = reflect.TypeOf((*HostTpmVersionEventDetails)(nil)).Elem() +} + +type HostTrustAuthorityAttestationInfo struct { + DynamicData + + AttestationStatus string `xml:"attestationStatus"` + ServiceId string `xml:"serviceId,omitempty"` + AttestedAt *time.Time `xml:"attestedAt"` + AttestedUntil *time.Time `xml:"attestedUntil"` + Messages []LocalizableMessage `xml:"messages,omitempty"` +} + +func init() { + t["HostTrustAuthorityAttestationInfo"] = reflect.TypeOf((*HostTrustAuthorityAttestationInfo)(nil)).Elem() +} + type HostUnresolvedVmfsExtent struct { DynamicData @@ -24861,6 +27288,50 @@ func init() { t["HostUnresolvedVmfsVolumeResolveStatus"] = reflect.TypeOf((*HostUnresolvedVmfsVolumeResolveStatus)(nil)).Elem() } +type HostUpdateVStorageObjectMetadataExRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + Metadata []KeyValue `xml:"metadata,omitempty"` + DeleteKeys []string `xml:"deleteKeys,omitempty"` +} + +func init() { + t["HostUpdateVStorageObjectMetadataExRequestType"] = reflect.TypeOf((*HostUpdateVStorageObjectMetadataExRequestType)(nil)).Elem() +} + +type HostUpdateVStorageObjectMetadataEx_Task HostUpdateVStorageObjectMetadataExRequestType + +func init() { + t["HostUpdateVStorageObjectMetadataEx_Task"] = reflect.TypeOf((*HostUpdateVStorageObjectMetadataEx_Task)(nil)).Elem() +} + +type HostUpdateVStorageObjectMetadataEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type HostUpdateVStorageObjectMetadataRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + Metadata []KeyValue `xml:"metadata,omitempty"` + DeleteKeys []string `xml:"deleteKeys,omitempty"` +} + +func init() { + t["HostUpdateVStorageObjectMetadataRequestType"] = reflect.TypeOf((*HostUpdateVStorageObjectMetadataRequestType)(nil)).Elem() +} + +type HostUpdateVStorageObjectMetadata_Task HostUpdateVStorageObjectMetadataRequestType + +func init() { + t["HostUpdateVStorageObjectMetadata_Task"] = reflect.TypeOf((*HostUpdateVStorageObjectMetadata_Task)(nil)).Elem() +} + +type HostUpdateVStorageObjectMetadata_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + type HostUpgradeFailedEvent struct { HostEvent } @@ -25008,6 +27479,39 @@ func init() { t["HostVMotionInfo"] = reflect.TypeOf((*HostVMotionInfo)(nil)).Elem() } +type HostVMotionManagerDstInstantCloneResult struct { + DynamicData + + DstVmId int32 `xml:"dstVmId,omitempty"` + StartTime int64 `xml:"startTime,omitempty"` + CptLoadTime int64 `xml:"cptLoadTime,omitempty"` + CptLoadDoneTime int64 `xml:"cptLoadDoneTime,omitempty"` + ReplicateMemDoneTime int64 `xml:"replicateMemDoneTime,omitempty"` + EndTime int64 `xml:"endTime,omitempty"` + CptXferTime int64 `xml:"cptXferTime,omitempty"` + CptCacheUsed int64 `xml:"cptCacheUsed,omitempty"` + DevCptStreamSize int64 `xml:"devCptStreamSize,omitempty"` + DevCptStreamTime int64 `xml:"devCptStreamTime,omitempty"` +} + +func init() { + t["HostVMotionManagerDstInstantCloneResult"] = reflect.TypeOf((*HostVMotionManagerDstInstantCloneResult)(nil)).Elem() +} + +type HostVMotionManagerSrcInstantCloneResult struct { + DynamicData + + StartTime int64 `xml:"startTime,omitempty"` + QuiesceTime int64 `xml:"quiesceTime,omitempty"` + QuiesceDoneTime int64 `xml:"quiesceDoneTime,omitempty"` + ResumeDoneTime int64 `xml:"resumeDoneTime,omitempty"` + EndTime int64 `xml:"endTime,omitempty"` +} + +func init() { + t["HostVMotionManagerSrcInstantCloneResult"] = reflect.TypeOf((*HostVMotionManagerSrcInstantCloneResult)(nil)).Elem() +} + type HostVMotionNetConfig struct { DynamicData @@ -25020,14 +27524,15 @@ func init() { } type HostVStorageObjectCreateDiskFromSnapshotRequestType struct { - This ManagedObjectReference `xml:"_this"` - Id ID `xml:"id"` - Datastore ManagedObjectReference `xml:"datastore"` - SnapshotId ID `xml:"snapshotId"` - Name string `xml:"name"` - Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` - Crypto BaseCryptoSpec `xml:"crypto,omitempty,typeattr"` - Path string `xml:"path,omitempty"` + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + SnapshotId ID `xml:"snapshotId"` + Name string `xml:"name"` + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` + Crypto BaseCryptoSpec `xml:"crypto,omitempty,typeattr"` + Path string `xml:"path,omitempty"` + ProvisioningType string `xml:"provisioningType,omitempty"` } func init() { @@ -25256,6 +27761,8 @@ type HostVirtualNicSpec struct { ExternalId string `xml:"externalId,omitempty"` PinnedPnic string `xml:"pinnedPnic,omitempty"` IpRouteSpec *HostVirtualNicIpRouteSpec `xml:"ipRouteSpec,omitempty"` + SystemOwned *bool `xml:"systemOwned"` + DpuId string `xml:"dpuId,omitempty"` } func init() { @@ -25479,10 +27986,11 @@ func init() { type HostVvolVolume struct { HostFileSystemVolume - ScId string `xml:"scId"` - HostPE []VVolHostPE `xml:"hostPE,omitempty"` - VasaProviderInfo []VimVasaProviderInfo `xml:"vasaProviderInfo,omitempty"` - StorageArray []VASAStorageArray `xml:"storageArray,omitempty"` + ScId string `xml:"scId"` + HostPE []VVolHostPE `xml:"hostPE,omitempty"` + VasaProviderInfo []VimVasaProviderInfo `xml:"vasaProviderInfo,omitempty"` + StorageArray []VASAStorageArray `xml:"storageArray,omitempty"` + ProtocolEndpointType string `xml:"protocolEndpointType,omitempty"` } func init() { @@ -25704,6 +28212,36 @@ func init() { t["HttpNfcLeaseManifestEntry"] = reflect.TypeOf((*HttpNfcLeaseManifestEntry)(nil)).Elem() } +type HttpNfcLeaseProbeResult struct { + DynamicData + + ServerAccessible bool `xml:"serverAccessible"` +} + +func init() { + t["HttpNfcLeaseProbeResult"] = reflect.TypeOf((*HttpNfcLeaseProbeResult)(nil)).Elem() +} + +type HttpNfcLeaseProbeUrls HttpNfcLeaseProbeUrlsRequestType + +func init() { + t["HttpNfcLeaseProbeUrls"] = reflect.TypeOf((*HttpNfcLeaseProbeUrls)(nil)).Elem() +} + +type HttpNfcLeaseProbeUrlsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Files []HttpNfcLeaseSourceFile `xml:"files,omitempty"` + Timeout int32 `xml:"timeout,omitempty"` +} + +func init() { + t["HttpNfcLeaseProbeUrlsRequestType"] = reflect.TypeOf((*HttpNfcLeaseProbeUrlsRequestType)(nil)).Elem() +} + +type HttpNfcLeaseProbeUrlsResponse struct { + Returnval []HttpNfcLeaseProbeResult `xml:"returnval,omitempty"` +} + type HttpNfcLeaseProgress HttpNfcLeaseProgressRequestType func init() { @@ -27319,7 +29857,7 @@ func init() { type InvalidOperationOnSecondaryVm struct { VmFaultToleranceIssue - InstanceUuid string `xml:"instanceUuid,omitempty"` + InstanceUuid string `xml:"instanceUuid"` } func init() { @@ -27716,6 +30254,25 @@ func init() { t["IpRouteProfile"] = reflect.TypeOf((*IpRouteProfile)(nil)).Elem() } +type IsKmsClusterActive IsKmsClusterActiveRequestType + +func init() { + t["IsKmsClusterActive"] = reflect.TypeOf((*IsKmsClusterActive)(nil)).Elem() +} + +type IsKmsClusterActiveRequestType struct { + This ManagedObjectReference `xml:"_this"` + Cluster *KeyProviderId `xml:"cluster,omitempty"` +} + +func init() { + t["IsKmsClusterActiveRequestType"] = reflect.TypeOf((*IsKmsClusterActiveRequestType)(nil)).Elem() +} + +type IsKmsClusterActiveResponse struct { + Returnval bool `xml:"returnval"` +} + type IsSharedGraphicsActive IsSharedGraphicsActiveRequestType func init() { @@ -28083,6 +30640,22 @@ func init() { t["KeyAnyValue"] = reflect.TypeOf((*KeyAnyValue)(nil)).Elem() } +type KeyNotFound struct { + VimFault + + Key string `xml:"key"` +} + +func init() { + t["KeyNotFound"] = reflect.TypeOf((*KeyNotFound)(nil)).Elem() +} + +type KeyNotFoundFault KeyNotFound + +func init() { + t["KeyNotFoundFault"] = reflect.TypeOf((*KeyNotFoundFault)(nil)).Elem() +} + type KeyProviderId struct { DynamicData @@ -28107,9 +30680,14 @@ func init() { type KmipClusterInfo struct { DynamicData - ClusterId KeyProviderId `xml:"clusterId"` - Servers []KmipServerInfo `xml:"servers,omitempty"` - UseAsDefault bool `xml:"useAsDefault"` + ClusterId KeyProviderId `xml:"clusterId"` + Servers []KmipServerInfo `xml:"servers,omitempty"` + UseAsDefault bool `xml:"useAsDefault"` + ManagementType string `xml:"managementType,omitempty"` + UseAsEntityDefault []ManagedObjectReference `xml:"useAsEntityDefault,omitempty"` + HasBackup *bool `xml:"hasBackup"` + TpmRequired *bool `xml:"tpmRequired"` + KeyId string `xml:"keyId,omitempty"` } func init() { @@ -28754,6 +31332,27 @@ type ListKmipServersResponse struct { Returnval []KmipClusterInfo `xml:"returnval,omitempty"` } +type ListKmsClusters ListKmsClustersRequestType + +func init() { + t["ListKmsClusters"] = reflect.TypeOf((*ListKmsClusters)(nil)).Elem() +} + +type ListKmsClustersRequestType struct { + This ManagedObjectReference `xml:"_this"` + IncludeKmsServers *bool `xml:"includeKmsServers"` + ManagementTypeFilter int32 `xml:"managementTypeFilter,omitempty"` + StatusFilter int32 `xml:"statusFilter,omitempty"` +} + +func init() { + t["ListKmsClustersRequestType"] = reflect.TypeOf((*ListKmsClustersRequestType)(nil)).Elem() +} + +type ListKmsClustersResponse struct { + Returnval []KmipClusterInfo `xml:"returnval,omitempty"` +} + type ListProcessesInGuest ListProcessesInGuestRequestType func init() { @@ -29495,6 +32094,63 @@ func init() { type MarkForRemovalResponse struct { } +type MarkPerenniallyReserved MarkPerenniallyReservedRequestType + +func init() { + t["MarkPerenniallyReserved"] = reflect.TypeOf((*MarkPerenniallyReserved)(nil)).Elem() +} + +type MarkPerenniallyReservedExRequestType struct { + This ManagedObjectReference `xml:"_this"` + LunUuid []string `xml:"lunUuid,omitempty"` + State bool `xml:"state"` +} + +func init() { + t["MarkPerenniallyReservedExRequestType"] = reflect.TypeOf((*MarkPerenniallyReservedExRequestType)(nil)).Elem() +} + +type MarkPerenniallyReservedEx_Task MarkPerenniallyReservedExRequestType + +func init() { + t["MarkPerenniallyReservedEx_Task"] = reflect.TypeOf((*MarkPerenniallyReservedEx_Task)(nil)).Elem() +} + +type MarkPerenniallyReservedEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type MarkPerenniallyReservedRequestType struct { + This ManagedObjectReference `xml:"_this"` + LunUuid string `xml:"lunUuid"` + State bool `xml:"state"` +} + +func init() { + t["MarkPerenniallyReservedRequestType"] = reflect.TypeOf((*MarkPerenniallyReservedRequestType)(nil)).Elem() +} + +type MarkPerenniallyReservedResponse struct { +} + +type MarkServiceProviderEntities MarkServiceProviderEntitiesRequestType + +func init() { + t["MarkServiceProviderEntities"] = reflect.TypeOf((*MarkServiceProviderEntities)(nil)).Elem() +} + +type MarkServiceProviderEntitiesRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity []ManagedObjectReference `xml:"entity,omitempty"` +} + +func init() { + t["MarkServiceProviderEntitiesRequestType"] = reflect.TypeOf((*MarkServiceProviderEntitiesRequestType)(nil)).Elem() +} + +type MarkServiceProviderEntitiesResponse struct { +} + type MemoryFileFormatNotSupportedByDatastore struct { UnsupportedDatastore @@ -31006,6 +33662,7 @@ type NetworkProfile struct { DvsHostNic []DvsHostVNicProfile `xml:"dvsHostNic,omitempty"` NsxHostNic []NsxHostVNicProfile `xml:"nsxHostNic,omitempty"` NetStackInstance []NetStackInstanceProfile `xml:"netStackInstance,omitempty"` + OpaqueSwitch *OpaqueSwitchProfile `xml:"opaqueSwitch,omitempty"` } func init() { @@ -31037,8 +33694,8 @@ type NetworkSummary struct { Network *ManagedObjectReference `xml:"network,omitempty"` Name string `xml:"name"` Accessible bool `xml:"accessible"` - IpPoolName string `xml:"ipPoolName,omitempty"` - IpPoolId int32 `xml:"ipPoolId,omitempty"` + IpPoolName string `xml:"ipPoolName"` + IpPoolId *int32 `xml:"ipPoolId"` } func init() { @@ -31387,14 +34044,26 @@ func init() { type NoPermission struct { SecurityError - Object ManagedObjectReference `xml:"object"` - PrivilegeId string `xml:"privilegeId"` + Object *ManagedObjectReference `xml:"object,omitempty"` + PrivilegeId string `xml:"privilegeId,omitempty"` + MissingPrivileges []NoPermissionEntityPrivileges `xml:"missingPrivileges,omitempty"` } func init() { t["NoPermission"] = reflect.TypeOf((*NoPermission)(nil)).Elem() } +type NoPermissionEntityPrivileges struct { + DynamicData + + Entity ManagedObjectReference `xml:"entity"` + PrivilegeIds []string `xml:"privilegeIds,omitempty"` +} + +func init() { + t["NoPermissionEntityPrivileges"] = reflect.TypeOf((*NoPermissionEntityPrivileges)(nil)).Elem() +} + type NoPermissionFault BaseNoPermission func init() { @@ -32101,6 +34770,22 @@ func init() { t["NvdimmNamespaceDeleteSpec"] = reflect.TypeOf((*NvdimmNamespaceDeleteSpec)(nil)).Elem() } +type NvdimmNamespaceDetails struct { + DynamicData + + Uuid string `xml:"uuid"` + FriendlyName string `xml:"friendlyName"` + Size int64 `xml:"size"` + Type string `xml:"type"` + NamespaceHealthStatus string `xml:"namespaceHealthStatus"` + InterleavesetID int32 `xml:"interleavesetID"` + State string `xml:"state"` +} + +func init() { + t["NvdimmNamespaceDetails"] = reflect.TypeOf((*NvdimmNamespaceDetails)(nil)).Elem() +} + type NvdimmNamespaceInfo struct { DynamicData @@ -32118,6 +34803,18 @@ func init() { t["NvdimmNamespaceInfo"] = reflect.TypeOf((*NvdimmNamespaceInfo)(nil)).Elem() } +type NvdimmPMemNamespaceCreateSpec struct { + DynamicData + + FriendlyName string `xml:"friendlyName,omitempty"` + Size int64 `xml:"size"` + InterleavesetID int32 `xml:"interleavesetID"` +} + +func init() { + t["NvdimmPMemNamespaceCreateSpec"] = reflect.TypeOf((*NvdimmPMemNamespaceCreateSpec)(nil)).Elem() +} + type NvdimmRegionInfo struct { DynamicData @@ -32160,6 +34857,7 @@ type NvdimmSystemInfo struct { ISetInfo []NvdimmInterleaveSetInfo `xml:"iSetInfo,omitempty"` Namespace []NvdimmGuid `xml:"namespace,omitempty"` NsInfo []NvdimmNamespaceInfo `xml:"nsInfo,omitempty"` + NsDetails []NvdimmNamespaceDetails `xml:"nsDetails,omitempty"` } func init() { @@ -32245,6 +34943,14 @@ func init() { t["OpaqueNetworkTargetInfo"] = reflect.TypeOf((*OpaqueNetworkTargetInfo)(nil)).Elem() } +type OpaqueSwitchProfile struct { + ApplyProfile +} + +func init() { + t["OpaqueSwitchProfile"] = reflect.TypeOf((*OpaqueSwitchProfile)(nil)).Elem() +} + type OpenInventoryViewFolder OpenInventoryViewFolderRequestType func init() { @@ -33972,6 +36678,20 @@ func init() { t["PassiveNodeNetworkSpec"] = reflect.TypeOf((*PassiveNodeNetworkSpec)(nil)).Elem() } +type PasswordExpired struct { + InvalidLogin +} + +func init() { + t["PasswordExpired"] = reflect.TypeOf((*PasswordExpired)(nil)).Elem() +} + +type PasswordExpiredFault PasswordExpired + +func init() { + t["PasswordExpiredFault"] = reflect.TypeOf((*PasswordExpiredFault)(nil)).Elem() +} + type PasswordField struct { DynamicData @@ -34479,6 +37199,8 @@ type PhysicalNic struct { Device string `xml:"device"` Pci string `xml:"pci"` Driver string `xml:"driver,omitempty"` + DriverVersion string `xml:"driverVersion,omitempty"` + FirmwareVersion string `xml:"firmwareVersion,omitempty"` LinkSpeed *PhysicalNicLinkInfo `xml:"linkSpeed,omitempty"` ValidLinkSpecification []PhysicalNicLinkInfo `xml:"validLinkSpecification,omitempty"` Spec PhysicalNicSpec `xml:"spec"` @@ -34491,6 +37213,9 @@ type PhysicalNic struct { ResourcePoolSchedulerDisallowedReason []string `xml:"resourcePoolSchedulerDisallowedReason,omitempty"` AutoNegotiateSupported *bool `xml:"autoNegotiateSupported"` EnhancedNetworkingStackSupported *bool `xml:"enhancedNetworkingStackSupported"` + EnsInterruptSupported *bool `xml:"ensInterruptSupported"` + RdmaDevice string `xml:"rdmaDevice,omitempty"` + DpuId string `xml:"dpuId,omitempty"` } func init() { @@ -34623,6 +37348,7 @@ type PhysicalNicSpec struct { Ip *HostIpConfig `xml:"ip,omitempty"` LinkSpeed *PhysicalNicLinkInfo `xml:"linkSpeed,omitempty"` EnableEnhancedNetworkingStack *bool `xml:"enableEnhancedNetworkingStack"` + EnsInterruptEnabled *bool `xml:"ensInterruptEnabled"` } func init() { @@ -35909,6 +38635,26 @@ type QueryCompatibleHostForNewDvsResponse struct { Returnval []ManagedObjectReference `xml:"returnval,omitempty"` } +type QueryCompatibleVmnicsFromHosts QueryCompatibleVmnicsFromHostsRequestType + +func init() { + t["QueryCompatibleVmnicsFromHosts"] = reflect.TypeOf((*QueryCompatibleVmnicsFromHosts)(nil)).Elem() +} + +type QueryCompatibleVmnicsFromHostsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Hosts []ManagedObjectReference `xml:"hosts,omitempty"` + Dvs ManagedObjectReference `xml:"dvs"` +} + +func init() { + t["QueryCompatibleVmnicsFromHostsRequestType"] = reflect.TypeOf((*QueryCompatibleVmnicsFromHostsRequestType)(nil)).Elem() +} + +type QueryCompatibleVmnicsFromHostsResponse struct { + Returnval []DVSManagerPhysicalNicsList `xml:"returnval,omitempty"` +} + type QueryComplianceStatus QueryComplianceStatusRequestType func init() { @@ -36066,6 +38812,44 @@ type QueryConnectionInfoViaSpecResponse struct { Returnval HostConnectInfo `xml:"returnval"` } +type QueryConnections QueryConnectionsRequestType + +func init() { + t["QueryConnections"] = reflect.TypeOf((*QueryConnections)(nil)).Elem() +} + +type QueryConnectionsRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryConnectionsRequestType"] = reflect.TypeOf((*QueryConnectionsRequestType)(nil)).Elem() +} + +type QueryConnectionsResponse struct { + Returnval []BaseVirtualMachineConnection `xml:"returnval,omitempty,typeattr"` +} + +type QueryCryptoKeyStatus QueryCryptoKeyStatusRequestType + +func init() { + t["QueryCryptoKeyStatus"] = reflect.TypeOf((*QueryCryptoKeyStatus)(nil)).Elem() +} + +type QueryCryptoKeyStatusRequestType struct { + This ManagedObjectReference `xml:"_this"` + KeyIds []CryptoKeyId `xml:"keyIds,omitempty"` + CheckKeyBitMap int32 `xml:"checkKeyBitMap"` +} + +func init() { + t["QueryCryptoKeyStatusRequestType"] = reflect.TypeOf((*QueryCryptoKeyStatusRequestType)(nil)).Elem() +} + +type QueryCryptoKeyStatusResponse struct { + Returnval []CryptoManagerKmipCryptoKeyStatus `xml:"returnval,omitempty"` +} + type QueryDatastorePerformanceSummary QueryDatastorePerformanceSummaryRequestType func init() { @@ -36561,6 +39345,25 @@ type QueryHostStatusResponse struct { Returnval VsanHostClusterStatus `xml:"returnval"` } +type QueryHostsWithAttachedLun QueryHostsWithAttachedLunRequestType + +func init() { + t["QueryHostsWithAttachedLun"] = reflect.TypeOf((*QueryHostsWithAttachedLun)(nil)).Elem() +} + +type QueryHostsWithAttachedLunRequestType struct { + This ManagedObjectReference `xml:"_this"` + LunUuid string `xml:"lunUuid"` +} + +func init() { + t["QueryHostsWithAttachedLunRequestType"] = reflect.TypeOf((*QueryHostsWithAttachedLunRequestType)(nil)).Elem() +} + +type QueryHostsWithAttachedLunResponse struct { + Returnval []ManagedObjectReference `xml:"returnval,omitempty"` +} + type QueryIORMConfigOption QueryIORMConfigOptionRequestType func init() { @@ -36734,6 +39537,25 @@ type QueryManagedByResponse struct { Returnval []ManagedObjectReference `xml:"returnval,omitempty"` } +type QueryMaxQueueDepth QueryMaxQueueDepthRequestType + +func init() { + t["QueryMaxQueueDepth"] = reflect.TypeOf((*QueryMaxQueueDepth)(nil)).Elem() +} + +type QueryMaxQueueDepthRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["QueryMaxQueueDepthRequestType"] = reflect.TypeOf((*QueryMaxQueueDepthRequestType)(nil)).Elem() +} + +type QueryMaxQueueDepthResponse struct { + Returnval int64 `xml:"returnval"` +} + type QueryMemoryOverhead QueryMemoryOverheadRequestType func init() { @@ -37135,6 +39957,24 @@ type QueryPolicyMetadataResponse struct { Returnval []ProfilePolicyMetadata `xml:"returnval,omitempty"` } +type QueryProductLockerLocation QueryProductLockerLocationRequestType + +func init() { + t["QueryProductLockerLocation"] = reflect.TypeOf((*QueryProductLockerLocation)(nil)).Elem() +} + +type QueryProductLockerLocationRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryProductLockerLocationRequestType"] = reflect.TypeOf((*QueryProductLockerLocationRequestType)(nil)).Elem() +} + +type QueryProductLockerLocationResponse struct { + Returnval string `xml:"returnval"` +} + type QueryProfileStructure QueryProfileStructureRequestType func init() { @@ -37266,6 +40106,25 @@ type QuerySupportedFeaturesResponse struct { Returnval []LicenseFeatureInfo `xml:"returnval,omitempty"` } +type QuerySupportedNetworkOffloadSpec QuerySupportedNetworkOffloadSpecRequestType + +func init() { + t["QuerySupportedNetworkOffloadSpec"] = reflect.TypeOf((*QuerySupportedNetworkOffloadSpec)(nil)).Elem() +} + +type QuerySupportedNetworkOffloadSpecRequestType struct { + This ManagedObjectReference `xml:"_this"` + SwitchProductSpec DistributedVirtualSwitchProductSpec `xml:"switchProductSpec"` +} + +func init() { + t["QuerySupportedNetworkOffloadSpecRequestType"] = reflect.TypeOf((*QuerySupportedNetworkOffloadSpecRequestType)(nil)).Elem() +} + +type QuerySupportedNetworkOffloadSpecResponse struct { + Returnval []DistributedVirtualSwitchNetworkOffloadSpec `xml:"returnval,omitempty"` +} + type QuerySyncingVsanObjects QuerySyncingVsanObjectsRequestType func init() { @@ -38860,6 +41719,25 @@ func init() { type RegisterKmipServerResponse struct { } +type RegisterKmsCluster RegisterKmsClusterRequestType + +func init() { + t["RegisterKmsCluster"] = reflect.TypeOf((*RegisterKmsCluster)(nil)).Elem() +} + +type RegisterKmsClusterRequestType struct { + This ManagedObjectReference `xml:"_this"` + ClusterId KeyProviderId `xml:"clusterId"` + ManagementType string `xml:"managementType,omitempty"` +} + +func init() { + t["RegisterKmsClusterRequestType"] = reflect.TypeOf((*RegisterKmsClusterRequestType)(nil)).Elem() +} + +type RegisterKmsClusterResponse struct { +} + type RegisterVMRequestType struct { This ManagedObjectReference `xml:"_this"` Path string `xml:"path"` @@ -39345,6 +42223,7 @@ type RemoveInternetScsiSendTargetsRequestType struct { This ManagedObjectReference `xml:"_this"` IScsiHbaDevice string `xml:"iScsiHbaDevice"` Targets []HostInternetScsiHbaSendTarget `xml:"targets"` + Force *bool `xml:"force"` } func init() { @@ -39505,6 +42384,24 @@ func init() { type RemoveNetworkResourcePoolResponse struct { } +type RemoveNvmeOverRdmaAdapter RemoveNvmeOverRdmaAdapterRequestType + +func init() { + t["RemoveNvmeOverRdmaAdapter"] = reflect.TypeOf((*RemoveNvmeOverRdmaAdapter)(nil)).Elem() +} + +type RemoveNvmeOverRdmaAdapterRequestType struct { + This ManagedObjectReference `xml:"_this"` + HbaDeviceName string `xml:"hbaDeviceName"` +} + +func init() { + t["RemoveNvmeOverRdmaAdapterRequestType"] = reflect.TypeOf((*RemoveNvmeOverRdmaAdapterRequestType)(nil)).Elem() +} + +type RemoveNvmeOverRdmaAdapterResponse struct { +} + type RemovePerfInterval RemovePerfIntervalRequestType func init() { @@ -39634,6 +42531,24 @@ type RemoveSnapshot_TaskResponse struct { Returnval ManagedObjectReference `xml:"returnval"` } +type RemoveSoftwareAdapter RemoveSoftwareAdapterRequestType + +func init() { + t["RemoveSoftwareAdapter"] = reflect.TypeOf((*RemoveSoftwareAdapter)(nil)).Elem() +} + +type RemoveSoftwareAdapterRequestType struct { + This ManagedObjectReference `xml:"_this"` + HbaDeviceName string `xml:"hbaDeviceName"` +} + +func init() { + t["RemoveSoftwareAdapterRequestType"] = reflect.TypeOf((*RemoveSoftwareAdapterRequestType)(nil)).Elem() +} + +type RemoveSoftwareAdapterResponse struct { +} + type RemoveUser RemoveUserRequestType func init() { @@ -39869,6 +42784,7 @@ type ReplicationConfigSpec struct { EncryptionDestination string `xml:"encryptionDestination,omitempty"` EncryptionPort int32 `xml:"encryptionPort,omitempty"` RemoteCertificateThumbprint string `xml:"remoteCertificateThumbprint,omitempty"` + DataSetsReplicationEnabled *bool `xml:"dataSetsReplicationEnabled"` Disk []ReplicationInfoDiskSettings `xml:"disk,omitempty"` } @@ -40005,10 +42921,10 @@ func init() { type ReplicationVmFault struct { ReplicationFault - Reason string `xml:"reason,omitempty"` - State string `xml:"state,omitempty"` - InstanceId string `xml:"instanceId,omitempty"` - Vm *ManagedObjectReference `xml:"vm,omitempty"` + Reason string `xml:"reason"` + State string `xml:"state,omitempty"` + InstanceId string `xml:"instanceId,omitempty"` + Vm ManagedObjectReference `xml:"vm"` } func init() { @@ -40430,11 +43346,12 @@ func init() { type ResourceConfigSpec struct { DynamicData - Entity *ManagedObjectReference `xml:"entity,omitempty"` - ChangeVersion string `xml:"changeVersion,omitempty"` - LastModified *time.Time `xml:"lastModified"` - CpuAllocation ResourceAllocationInfo `xml:"cpuAllocation"` - MemoryAllocation ResourceAllocationInfo `xml:"memoryAllocation"` + Entity *ManagedObjectReference `xml:"entity,omitempty"` + ChangeVersion string `xml:"changeVersion,omitempty"` + LastModified *time.Time `xml:"lastModified"` + CpuAllocation ResourceAllocationInfo `xml:"cpuAllocation"` + MemoryAllocation ResourceAllocationInfo `xml:"memoryAllocation"` + ScaleDescendantsShares string `xml:"scaleDescendantsShares,omitempty"` } func init() { @@ -40577,9 +43494,10 @@ func init() { type ResourcePoolRuntimeInfo struct { DynamicData - Memory ResourcePoolResourceUsage `xml:"memory"` - Cpu ResourcePoolResourceUsage `xml:"cpu"` - OverallStatus ManagedEntityStatus `xml:"overallStatus"` + Memory ResourcePoolResourceUsage `xml:"memory"` + Cpu ResourcePoolResourceUsage `xml:"cpu"` + OverallStatus ManagedEntityStatus `xml:"overallStatus"` + SharesScalable string `xml:"sharesScalable,omitempty"` } func init() { @@ -40861,6 +43779,24 @@ type RetrieveDiskPartitionInfoResponse struct { Returnval []HostDiskPartitionInfo `xml:"returnval,omitempty"` } +type RetrieveDynamicPassthroughInfo RetrieveDynamicPassthroughInfoRequestType + +func init() { + t["RetrieveDynamicPassthroughInfo"] = reflect.TypeOf((*RetrieveDynamicPassthroughInfo)(nil)).Elem() +} + +type RetrieveDynamicPassthroughInfoRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RetrieveDynamicPassthroughInfoRequestType"] = reflect.TypeOf((*RetrieveDynamicPassthroughInfoRequestType)(nil)).Elem() +} + +type RetrieveDynamicPassthroughInfoResponse struct { + Returnval []VirtualMachineDynamicPassthroughInfo `xml:"returnval,omitempty"` +} + type RetrieveEntityPermissions RetrieveEntityPermissionsRequestType func init() { @@ -40900,6 +43836,24 @@ type RetrieveEntityScheduledTaskResponse struct { Returnval []ManagedObjectReference `xml:"returnval,omitempty"` } +type RetrieveFreeEpcMemory RetrieveFreeEpcMemoryRequestType + +func init() { + t["RetrieveFreeEpcMemory"] = reflect.TypeOf((*RetrieveFreeEpcMemory)(nil)).Elem() +} + +type RetrieveFreeEpcMemoryRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RetrieveFreeEpcMemoryRequestType"] = reflect.TypeOf((*RetrieveFreeEpcMemoryRequestType)(nil)).Elem() +} + +type RetrieveFreeEpcMemoryResponse struct { + Returnval int64 `xml:"returnval"` +} + type RetrieveHardwareUptime RetrieveHardwareUptimeRequestType func init() { @@ -41187,6 +44141,45 @@ type RetrieveServiceContentResponse struct { Returnval ServiceContent `xml:"returnval"` } +type RetrieveServiceProviderEntities RetrieveServiceProviderEntitiesRequestType + +func init() { + t["RetrieveServiceProviderEntities"] = reflect.TypeOf((*RetrieveServiceProviderEntities)(nil)).Elem() +} + +type RetrieveServiceProviderEntitiesRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RetrieveServiceProviderEntitiesRequestType"] = reflect.TypeOf((*RetrieveServiceProviderEntitiesRequestType)(nil)).Elem() +} + +type RetrieveServiceProviderEntitiesResponse struct { + Returnval []ManagedObjectReference `xml:"returnval,omitempty"` +} + +type RetrieveSnapshotDetails RetrieveSnapshotDetailsRequestType + +func init() { + t["RetrieveSnapshotDetails"] = reflect.TypeOf((*RetrieveSnapshotDetails)(nil)).Elem() +} + +type RetrieveSnapshotDetailsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + SnapshotId ID `xml:"snapshotId"` +} + +func init() { + t["RetrieveSnapshotDetailsRequestType"] = reflect.TypeOf((*RetrieveSnapshotDetailsRequestType)(nil)).Elem() +} + +type RetrieveSnapshotDetailsResponse struct { + Returnval VStorageObjectSnapshotDetails `xml:"returnval"` +} + type RetrieveSnapshotInfo RetrieveSnapshotInfoRequestType func init() { @@ -41288,9 +44281,10 @@ type RetrieveVStorageObjectAssociationsResponse struct { } type RetrieveVStorageObjectRequestType struct { - This ManagedObjectReference `xml:"_this"` - Id ID `xml:"id"` - Datastore ManagedObjectReference `xml:"datastore"` + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + DiskInfoFlags []string `xml:"diskInfoFlags,omitempty"` } func init() { @@ -41321,6 +44315,60 @@ type RetrieveVStorageObjectStateResponse struct { Returnval VStorageObjectStateInfo `xml:"returnval"` } +type RetrieveVendorDeviceGroupInfo RetrieveVendorDeviceGroupInfoRequestType + +func init() { + t["RetrieveVendorDeviceGroupInfo"] = reflect.TypeOf((*RetrieveVendorDeviceGroupInfo)(nil)).Elem() +} + +type RetrieveVendorDeviceGroupInfoRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RetrieveVendorDeviceGroupInfoRequestType"] = reflect.TypeOf((*RetrieveVendorDeviceGroupInfoRequestType)(nil)).Elem() +} + +type RetrieveVendorDeviceGroupInfoResponse struct { + Returnval []VirtualMachineVendorDeviceGroupInfo `xml:"returnval,omitempty"` +} + +type RetrieveVgpuDeviceInfo RetrieveVgpuDeviceInfoRequestType + +func init() { + t["RetrieveVgpuDeviceInfo"] = reflect.TypeOf((*RetrieveVgpuDeviceInfo)(nil)).Elem() +} + +type RetrieveVgpuDeviceInfoRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RetrieveVgpuDeviceInfoRequestType"] = reflect.TypeOf((*RetrieveVgpuDeviceInfoRequestType)(nil)).Elem() +} + +type RetrieveVgpuDeviceInfoResponse struct { + Returnval []VirtualMachineVgpuDeviceInfo `xml:"returnval,omitempty"` +} + +type RetrieveVgpuProfileInfo RetrieveVgpuProfileInfoRequestType + +func init() { + t["RetrieveVgpuProfileInfo"] = reflect.TypeOf((*RetrieveVgpuProfileInfo)(nil)).Elem() +} + +type RetrieveVgpuProfileInfoRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RetrieveVgpuProfileInfoRequestType"] = reflect.TypeOf((*RetrieveVgpuProfileInfoRequestType)(nil)).Elem() +} + +type RetrieveVgpuProfileInfoResponse struct { + Returnval []VirtualMachineVgpuProfileInfo `xml:"returnval,omitempty"` +} + type RevertToCurrentSnapshotRequestType struct { This ManagedObjectReference `xml:"_this"` Host *ManagedObjectReference `xml:"host,omitempty"` @@ -41861,25 +44909,27 @@ func init() { type ScsiLun struct { HostDevice - Key string `xml:"key,omitempty"` - Uuid string `xml:"uuid"` - Descriptor []ScsiLunDescriptor `xml:"descriptor,omitempty"` - CanonicalName string `xml:"canonicalName,omitempty"` - DisplayName string `xml:"displayName,omitempty"` - LunType string `xml:"lunType"` - Vendor string `xml:"vendor,omitempty"` - Model string `xml:"model,omitempty"` - Revision string `xml:"revision,omitempty"` - ScsiLevel int32 `xml:"scsiLevel,omitempty"` - SerialNumber string `xml:"serialNumber,omitempty"` - DurableName *ScsiLunDurableName `xml:"durableName,omitempty"` - AlternateName []ScsiLunDurableName `xml:"alternateName,omitempty"` - StandardInquiry []byte `xml:"standardInquiry,omitempty"` - QueueDepth int32 `xml:"queueDepth,omitempty"` - OperationalState []string `xml:"operationalState"` - Capabilities *ScsiLunCapabilities `xml:"capabilities,omitempty"` - VStorageSupport string `xml:"vStorageSupport,omitempty"` - ProtocolEndpoint *bool `xml:"protocolEndpoint"` + Key string `xml:"key,omitempty"` + Uuid string `xml:"uuid"` + Descriptor []ScsiLunDescriptor `xml:"descriptor,omitempty"` + CanonicalName string `xml:"canonicalName,omitempty"` + DisplayName string `xml:"displayName,omitempty"` + LunType string `xml:"lunType"` + Vendor string `xml:"vendor,omitempty"` + Model string `xml:"model,omitempty"` + Revision string `xml:"revision,omitempty"` + ScsiLevel int32 `xml:"scsiLevel,omitempty"` + SerialNumber string `xml:"serialNumber,omitempty"` + DurableName *ScsiLunDurableName `xml:"durableName,omitempty"` + AlternateName []ScsiLunDurableName `xml:"alternateName,omitempty"` + StandardInquiry []byte `xml:"standardInquiry,omitempty"` + QueueDepth int32 `xml:"queueDepth,omitempty"` + OperationalState []string `xml:"operationalState"` + Capabilities *ScsiLunCapabilities `xml:"capabilities,omitempty"` + VStorageSupport string `xml:"vStorageSupport,omitempty"` + ProtocolEndpoint *bool `xml:"protocolEndpoint"` + PerenniallyReserved *bool `xml:"perenniallyReserved"` + ClusteredVmdkSupported *bool `xml:"clusteredVmdkSupported"` } func init() { @@ -42004,7 +45054,7 @@ func init() { type SecondaryVmAlreadyRegistered struct { VmFaultToleranceIssue - InstanceUuid string `xml:"instanceUuid,omitempty"` + InstanceUuid string `xml:"instanceUuid"` } func init() { @@ -42020,7 +45070,7 @@ func init() { type SecondaryVmNotRegistered struct { VmFaultToleranceIssue - InstanceUuid string `xml:"instanceUuid,omitempty"` + InstanceUuid string `xml:"instanceUuid"` } func init() { @@ -42244,6 +45294,7 @@ type ServiceContent struct { TaskManager *ManagedObjectReference `xml:"taskManager,omitempty"` ExtensionManager *ManagedObjectReference `xml:"extensionManager,omitempty"` CustomizationSpecManager *ManagedObjectReference `xml:"customizationSpecManager,omitempty"` + GuestCustomizationManager *ManagedObjectReference `xml:"guestCustomizationManager,omitempty"` CustomFieldsManager *ManagedObjectReference `xml:"customFieldsManager,omitempty"` AccountManager *ManagedObjectReference `xml:"accountManager,omitempty"` DiagnosticManager *ManagedObjectReference `xml:"diagnosticManager,omitempty"` @@ -42274,6 +45325,9 @@ type ServiceContent struct { HealthUpdateManager *ManagedObjectReference `xml:"healthUpdateManager,omitempty"` FailoverClusterConfigurator *ManagedObjectReference `xml:"failoverClusterConfigurator,omitempty"` FailoverClusterManager *ManagedObjectReference `xml:"failoverClusterManager,omitempty"` + TenantManager *ManagedObjectReference `xml:"tenantManager,omitempty"` + SiteInfoManager *ManagedObjectReference `xml:"siteInfoManager,omitempty"` + StorageQueryManager *ManagedObjectReference `xml:"storageQueryManager,omitempty"` } func init() { @@ -42376,9 +45430,11 @@ type SessionIsActiveResponse struct { type SessionManagerGenericServiceTicket struct { DynamicData - Id string `xml:"id"` - HostName string `xml:"hostName,omitempty"` - SslThumbprint string `xml:"sslThumbprint,omitempty"` + Id string `xml:"id"` + HostName string `xml:"hostName,omitempty"` + SslThumbprint string `xml:"sslThumbprint,omitempty"` + CertThumbprintList []VirtualMachineCertThumbprint `xml:"certThumbprintList,omitempty"` + TicketType string `xml:"ticketType,omitempty"` } func init() { @@ -42454,6 +45510,43 @@ func init() { type SetCollectorPageSizeResponse struct { } +type SetCryptoMode SetCryptoModeRequestType + +func init() { + t["SetCryptoMode"] = reflect.TypeOf((*SetCryptoMode)(nil)).Elem() +} + +type SetCryptoModeRequestType struct { + This ManagedObjectReference `xml:"_this"` + CryptoMode string `xml:"cryptoMode"` +} + +func init() { + t["SetCryptoModeRequestType"] = reflect.TypeOf((*SetCryptoModeRequestType)(nil)).Elem() +} + +type SetCryptoModeResponse struct { +} + +type SetDefaultKmsCluster SetDefaultKmsClusterRequestType + +func init() { + t["SetDefaultKmsCluster"] = reflect.TypeOf((*SetDefaultKmsCluster)(nil)).Elem() +} + +type SetDefaultKmsClusterRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity *ManagedObjectReference `xml:"entity,omitempty"` + ClusterId *KeyProviderId `xml:"clusterId,omitempty"` +} + +func init() { + t["SetDefaultKmsClusterRequestType"] = reflect.TypeOf((*SetDefaultKmsClusterRequestType)(nil)).Elem() +} + +type SetDefaultKmsClusterResponse struct { +} + type SetDisplayTopology SetDisplayTopologyRequestType func init() { @@ -42567,6 +45660,25 @@ func init() { type SetLocaleResponse struct { } +type SetMaxQueueDepth SetMaxQueueDepthRequestType + +func init() { + t["SetMaxQueueDepth"] = reflect.TypeOf((*SetMaxQueueDepth)(nil)).Elem() +} + +type SetMaxQueueDepthRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore ManagedObjectReference `xml:"datastore"` + MaxQdepth int64 `xml:"maxQdepth"` +} + +func init() { + t["SetMaxQueueDepthRequestType"] = reflect.TypeOf((*SetMaxQueueDepthRequestType)(nil)).Elem() +} + +type SetMaxQueueDepthResponse struct { +} + type SetMultipathLunPolicy SetMultipathLunPolicyRequestType func init() { @@ -42870,6 +45982,14 @@ func init() { t["SingleMac"] = reflect.TypeOf((*SingleMac)(nil)).Elem() } +type SiteInfo struct { + DynamicData +} + +func init() { + t["SiteInfo"] = reflect.TypeOf((*SiteInfo)(nil)).Elem() +} + type SnapshotCloneNotSupported struct { SnapshotCopyNotSupported } @@ -43102,6 +46222,20 @@ func init() { t["SoftwarePackageCapability"] = reflect.TypeOf((*SoftwarePackageCapability)(nil)).Elem() } +type SolutionUserRequired struct { + SecurityError +} + +func init() { + t["SolutionUserRequired"] = reflect.TypeOf((*SolutionUserRequired)(nil)).Elem() +} + +type SolutionUserRequiredFault SolutionUserRequired + +func init() { + t["SolutionUserRequiredFault"] = reflect.TypeOf((*SolutionUserRequiredFault)(nil)).Elem() +} + type SourceNodeSpec struct { DynamicData @@ -43186,6 +46320,26 @@ func init() { type StandbyGuestResponse struct { } +type StartGuestNetworkRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` +} + +func init() { + t["StartGuestNetworkRequestType"] = reflect.TypeOf((*StartGuestNetworkRequestType)(nil)).Elem() +} + +type StartGuestNetwork_Task StartGuestNetworkRequestType + +func init() { + t["StartGuestNetwork_Task"] = reflect.TypeOf((*StartGuestNetwork_Task)(nil)).Elem() +} + +type StartGuestNetwork_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + type StartProgramInGuest StartProgramInGuestRequestType func init() { @@ -44538,6 +47692,24 @@ func init() { type TerminateVMResponse struct { } +type TestTimeService TestTimeServiceRequestType + +func init() { + t["TestTimeService"] = reflect.TypeOf((*TestTimeService)(nil)).Elem() +} + +type TestTimeServiceRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["TestTimeServiceRequestType"] = reflect.TypeOf((*TestTimeServiceRequestType)(nil)).Elem() +} + +type TestTimeServiceResponse struct { + Returnval *HostDateTimeSystemServiceTestResult `xml:"returnval,omitempty"` +} + type ThirdPartyLicenseAssignmentFailed struct { RuntimeFault @@ -44748,18 +47920,19 @@ func init() { type ToolsConfigInfo struct { DynamicData - ToolsVersion int32 `xml:"toolsVersion,omitempty"` - ToolsInstallType string `xml:"toolsInstallType,omitempty"` - AfterPowerOn *bool `xml:"afterPowerOn"` - AfterResume *bool `xml:"afterResume"` - BeforeGuestStandby *bool `xml:"beforeGuestStandby"` - BeforeGuestShutdown *bool `xml:"beforeGuestShutdown"` - BeforeGuestReboot *bool `xml:"beforeGuestReboot"` - ToolsUpgradePolicy string `xml:"toolsUpgradePolicy,omitempty"` - PendingCustomization string `xml:"pendingCustomization,omitempty"` - CustomizationKeyId *CryptoKeyId `xml:"customizationKeyId,omitempty"` - SyncTimeWithHost *bool `xml:"syncTimeWithHost"` - LastInstallInfo *ToolsConfigInfoToolsLastInstallInfo `xml:"lastInstallInfo,omitempty"` + ToolsVersion int32 `xml:"toolsVersion,omitempty"` + ToolsInstallType string `xml:"toolsInstallType,omitempty"` + AfterPowerOn *bool `xml:"afterPowerOn"` + AfterResume *bool `xml:"afterResume"` + BeforeGuestStandby *bool `xml:"beforeGuestStandby"` + BeforeGuestShutdown *bool `xml:"beforeGuestShutdown"` + BeforeGuestReboot *bool `xml:"beforeGuestReboot"` + ToolsUpgradePolicy string `xml:"toolsUpgradePolicy,omitempty"` + PendingCustomization string `xml:"pendingCustomization,omitempty"` + CustomizationKeyId *CryptoKeyId `xml:"customizationKeyId,omitempty"` + SyncTimeWithHostAllowed *bool `xml:"syncTimeWithHostAllowed"` + SyncTimeWithHost *bool `xml:"syncTimeWithHost"` + LastInstallInfo *ToolsConfigInfoToolsLastInstallInfo `xml:"lastInstallInfo,omitempty"` } func init() { @@ -45169,6 +48342,24 @@ type UnmapVmfsVolumeEx_TaskResponse struct { Returnval ManagedObjectReference `xml:"returnval"` } +type UnmarkServiceProviderEntities UnmarkServiceProviderEntitiesRequestType + +func init() { + t["UnmarkServiceProviderEntities"] = reflect.TypeOf((*UnmarkServiceProviderEntities)(nil)).Elem() +} + +type UnmarkServiceProviderEntitiesRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity []ManagedObjectReference `xml:"entity,omitempty"` +} + +func init() { + t["UnmarkServiceProviderEntitiesRequestType"] = reflect.TypeOf((*UnmarkServiceProviderEntitiesRequestType)(nil)).Elem() +} + +type UnmarkServiceProviderEntitiesResponse struct { +} + type UnmountDiskMappingRequestType struct { This ManagedObjectReference `xml:"_this"` Mapping []VsanHostDiskMapping `xml:"mapping"` @@ -45348,6 +48539,24 @@ func init() { type UnregisterHealthUpdateProviderResponse struct { } +type UnregisterKmsCluster UnregisterKmsClusterRequestType + +func init() { + t["UnregisterKmsCluster"] = reflect.TypeOf((*UnregisterKmsCluster)(nil)).Elem() +} + +type UnregisterKmsClusterRequestType struct { + This ManagedObjectReference `xml:"_this"` + ClusterId KeyProviderId `xml:"clusterId"` +} + +func init() { + t["UnregisterKmsClusterRequestType"] = reflect.TypeOf((*UnregisterKmsClusterRequestType)(nil)).Elem() +} + +type UnregisterKmsClusterResponse struct { +} + type UnregisterVM UnregisterVMRequestType func init() { @@ -45475,6 +48684,24 @@ type UpdateAnswerFile_TaskResponse struct { Returnval ManagedObjectReference `xml:"returnval"` } +type UpdateAssignableHardwareConfig UpdateAssignableHardwareConfigRequestType + +func init() { + t["UpdateAssignableHardwareConfig"] = reflect.TypeOf((*UpdateAssignableHardwareConfig)(nil)).Elem() +} + +type UpdateAssignableHardwareConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + Config HostAssignableHardwareConfig `xml:"config"` +} + +func init() { + t["UpdateAssignableHardwareConfigRequestType"] = reflect.TypeOf((*UpdateAssignableHardwareConfigRequestType)(nil)).Elem() +} + +type UpdateAssignableHardwareConfigResponse struct { +} + type UpdateAssignedLicense UpdateAssignedLicenseRequestType func init() { @@ -45919,6 +49146,25 @@ func init() { type UpdateHostSubSpecificationResponse struct { } +type UpdateHppMultipathLunPolicy UpdateHppMultipathLunPolicyRequestType + +func init() { + t["UpdateHppMultipathLunPolicy"] = reflect.TypeOf((*UpdateHppMultipathLunPolicy)(nil)).Elem() +} + +type UpdateHppMultipathLunPolicyRequestType struct { + This ManagedObjectReference `xml:"_this"` + LunId string `xml:"lunId"` + Policy HostMultipathInfoHppLogicalUnitPolicy `xml:"policy"` +} + +func init() { + t["UpdateHppMultipathLunPolicyRequestType"] = reflect.TypeOf((*UpdateHppMultipathLunPolicyRequestType)(nil)).Elem() +} + +type UpdateHppMultipathLunPolicyResponse struct { +} + type UpdateInternetScsiAdvancedOptions UpdateInternetScsiAdvancedOptionsRequestType func init() { @@ -46427,6 +49673,25 @@ func init() { type UpdatePortGroupResponse struct { } +type UpdateProductLockerLocationRequestType struct { + This ManagedObjectReference `xml:"_this"` + Path string `xml:"path"` +} + +func init() { + t["UpdateProductLockerLocationRequestType"] = reflect.TypeOf((*UpdateProductLockerLocationRequestType)(nil)).Elem() +} + +type UpdateProductLockerLocation_Task UpdateProductLockerLocationRequestType + +func init() { + t["UpdateProductLockerLocation_Task"] = reflect.TypeOf((*UpdateProductLockerLocation_Task)(nil)).Elem() +} + +type UpdateProductLockerLocation_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + type UpdateProgress UpdateProgressRequestType func init() { @@ -46715,6 +49980,28 @@ type UpdateVStorageInfrastructureObjectPolicy_TaskResponse struct { Returnval ManagedObjectReference `xml:"returnval"` } +type UpdateVStorageObjectCryptoRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` + DisksCrypto *DiskCryptoSpec `xml:"disksCrypto,omitempty"` +} + +func init() { + t["UpdateVStorageObjectCryptoRequestType"] = reflect.TypeOf((*UpdateVStorageObjectCryptoRequestType)(nil)).Elem() +} + +type UpdateVStorageObjectCrypto_Task UpdateVStorageObjectCryptoRequestType + +func init() { + t["UpdateVStorageObjectCrypto_Task"] = reflect.TypeOf((*UpdateVStorageObjectCrypto_Task)(nil)).Elem() +} + +type UpdateVStorageObjectCrypto_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + type UpdateVStorageObjectPolicyRequestType struct { This ManagedObjectReference `xml:"_this"` Id ID `xml:"id"` @@ -47291,16 +50578,52 @@ func init() { type VASAStorageArray struct { DynamicData - Name string `xml:"name"` - Uuid string `xml:"uuid"` - VendorId string `xml:"vendorId"` - ModelId string `xml:"modelId"` + Name string `xml:"name"` + Uuid string `xml:"uuid"` + VendorId string `xml:"vendorId"` + ModelId string `xml:"modelId"` + DiscoverySvcInfo []VASAStorageArrayDiscoverySvcInfo `xml:"discoverySvcInfo,omitempty"` } func init() { t["VASAStorageArray"] = reflect.TypeOf((*VASAStorageArray)(nil)).Elem() } +type VASAStorageArrayDiscoveryFcTransport struct { + DynamicData + + NodeWwn string `xml:"nodeWwn"` + PortWwn string `xml:"portWwn"` +} + +func init() { + t["VASAStorageArrayDiscoveryFcTransport"] = reflect.TypeOf((*VASAStorageArrayDiscoveryFcTransport)(nil)).Elem() +} + +type VASAStorageArrayDiscoveryIpTransport struct { + DynamicData + + IpAddress string `xml:"ipAddress"` + PortNumber string `xml:"portNumber,omitempty"` +} + +func init() { + t["VASAStorageArrayDiscoveryIpTransport"] = reflect.TypeOf((*VASAStorageArrayDiscoveryIpTransport)(nil)).Elem() +} + +type VASAStorageArrayDiscoverySvcInfo struct { + DynamicData + + PortType string `xml:"portType"` + SvcNqn string `xml:"svcNqn"` + IpInfo *VASAStorageArrayDiscoveryIpTransport `xml:"ipInfo,omitempty"` + FcInfo *VASAStorageArrayDiscoveryFcTransport `xml:"fcInfo,omitempty"` +} + +func init() { + t["VASAStorageArrayDiscoverySvcInfo"] = reflect.TypeOf((*VASAStorageArrayDiscoverySvcInfo)(nil)).Elem() +} + type VAppCloneSpec struct { DynamicData @@ -47559,6 +50882,28 @@ func init() { t["VAppTaskInProgressFault"] = reflect.TypeOf((*VAppTaskInProgressFault)(nil)).Elem() } +type VCenterUpdateVStorageObjectMetadataExRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + Metadata []KeyValue `xml:"metadata,omitempty"` + DeleteKeys []string `xml:"deleteKeys,omitempty"` +} + +func init() { + t["VCenterUpdateVStorageObjectMetadataExRequestType"] = reflect.TypeOf((*VCenterUpdateVStorageObjectMetadataExRequestType)(nil)).Elem() +} + +type VCenterUpdateVStorageObjectMetadataEx_Task VCenterUpdateVStorageObjectMetadataExRequestType + +func init() { + t["VCenterUpdateVStorageObjectMetadataEx_Task"] = reflect.TypeOf((*VCenterUpdateVStorageObjectMetadataEx_Task)(nil)).Elem() +} + +type VCenterUpdateVStorageObjectMetadataEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + type VFlashCacheHotConfigNotSupported struct { VmConfigFault } @@ -47823,6 +51168,7 @@ type VMwareDVSConfigInfo struct { LacpGroupConfig []VMwareDvsLacpGroupConfig `xml:"lacpGroupConfig,omitempty"` LacpApiVersion string `xml:"lacpApiVersion,omitempty"` MulticastFilteringMode string `xml:"multicastFilteringMode,omitempty"` + NetworkOffloadSpecId string `xml:"networkOffloadSpecId,omitempty"` } func init() { @@ -47839,6 +51185,7 @@ type VMwareDVSConfigSpec struct { IpfixConfig *VMwareIpfixConfig `xml:"ipfixConfig,omitempty"` LacpApiVersion string `xml:"lacpApiVersion,omitempty"` MulticastFilteringMode string `xml:"multicastFilteringMode,omitempty"` + NetworkOffloadSpecId string `xml:"networkOffloadSpecId,omitempty"` } func init() { @@ -47855,6 +51202,9 @@ type VMwareDVSFeatureCapability struct { MulticastSnoopingSupported *bool `xml:"multicastSnoopingSupported"` VspanCapability *VMwareDVSVspanCapability `xml:"vspanCapability,omitempty"` LacpCapability *VMwareDvsLacpCapability `xml:"lacpCapability,omitempty"` + DpuCapability *VMwareDvsDpuCapability `xml:"dpuCapability,omitempty"` + NsxSupported *bool `xml:"nsxSupported"` + MtuCapability *VMwareDvsMtuCapability `xml:"mtuCapability,omitempty"` } func init() { @@ -47903,6 +51253,7 @@ type VMwareDVSPortSetting struct { TxUplink *BoolPolicy `xml:"txUplink,omitempty"` LacpPolicy *VMwareUplinkLacpPolicy `xml:"lacpPolicy,omitempty"` MacManagementPolicy *DVSMacManagementPolicy `xml:"macManagementPolicy,omitempty"` + VNI *IntPolicy `xml:"VNI,omitempty"` } func init() { @@ -47916,6 +51267,7 @@ type VMwareDVSPortgroupPolicy struct { UplinkTeamingOverrideAllowed bool `xml:"uplinkTeamingOverrideAllowed"` SecurityPolicyOverrideAllowed bool `xml:"securityPolicyOverrideAllowed"` IpfixOverrideAllowed *bool `xml:"ipfixOverrideAllowed"` + MacManagementOverrideAllowed *bool `xml:"macManagementOverrideAllowed"` } func init() { @@ -48009,6 +51361,16 @@ func init() { t["VMwareDVSVspanConfigSpec"] = reflect.TypeOf((*VMwareDVSVspanConfigSpec)(nil)).Elem() } +type VMwareDvsDpuCapability struct { + DynamicData + + NetworkOffloadSupported *bool `xml:"networkOffloadSupported"` +} + +func init() { + t["VMwareDvsDpuCapability"] = reflect.TypeOf((*VMwareDvsDpuCapability)(nil)).Elem() +} + type VMwareDvsIpfixCapability struct { DynamicData @@ -48026,6 +51388,7 @@ type VMwareDvsLacpCapability struct { LacpSupported *bool `xml:"lacpSupported"` MultiLacpGroupSupported *bool `xml:"multiLacpGroupSupported"` + LacpFastModeSupported *bool `xml:"lacpFastModeSupported"` } func init() { @@ -48044,6 +51407,7 @@ type VMwareDvsLacpGroupConfig struct { Ipfix *VMwareDvsLagIpfixConfig `xml:"ipfix,omitempty"` UplinkName []string `xml:"uplinkName,omitempty"` UplinkPortKey []string `xml:"uplinkPortKey,omitempty"` + TimeoutMode string `xml:"timeoutMode,omitempty"` } func init() { @@ -48081,6 +51445,17 @@ func init() { t["VMwareDvsLagVlanConfig"] = reflect.TypeOf((*VMwareDvsLagVlanConfig)(nil)).Elem() } +type VMwareDvsMtuCapability struct { + DynamicData + + MinMtuSupported int32 `xml:"minMtuSupported"` + MaxMtuSupported int32 `xml:"maxMtuSupported"` +} + +func init() { + t["VMwareDvsMtuCapability"] = reflect.TypeOf((*VMwareDvsMtuCapability)(nil)).Elem() +} + type VMwareIpfixConfig struct { DynamicData @@ -48226,6 +51601,17 @@ type VStorageObjectCreateSnapshot_TaskResponse struct { Returnval ManagedObjectReference `xml:"returnval"` } +type VStorageObjectSnapshotDetails struct { + DynamicData + + Path string `xml:"path,omitempty"` + ChangedBlockTrackingId string `xml:"changedBlockTrackingId,omitempty"` +} + +func init() { + t["VStorageObjectSnapshotDetails"] = reflect.TypeOf((*VStorageObjectSnapshotDetails)(nil)).Elem() +} + type VStorageObjectSnapshotInfo struct { DynamicData @@ -48285,6 +51671,7 @@ type VVolVmConfigFileUpdateResultFailedVmConfigFileInfo struct { DynamicData TargetConfigVVolId string `xml:"targetConfigVVolId"` + DsPath string `xml:"dsPath,omitempty"` Fault LocalizedMethodFault `xml:"fault"` } @@ -48311,6 +51698,26 @@ func init() { type ValidateCredentialsInGuestResponse struct { } +type ValidateHCIConfiguration ValidateHCIConfigurationRequestType + +func init() { + t["ValidateHCIConfiguration"] = reflect.TypeOf((*ValidateHCIConfiguration)(nil)).Elem() +} + +type ValidateHCIConfigurationRequestType struct { + This ManagedObjectReference `xml:"_this"` + HciConfigSpec *ClusterComputeResourceHCIConfigSpec `xml:"hciConfigSpec,omitempty"` + Hosts []ManagedObjectReference `xml:"hosts,omitempty"` +} + +func init() { + t["ValidateHCIConfigurationRequestType"] = reflect.TypeOf((*ValidateHCIConfigurationRequestType)(nil)).Elem() +} + +type ValidateHCIConfigurationResponse struct { + Returnval []BaseClusterComputeResourceValidationResultBase `xml:"returnval,omitempty,typeattr"` +} + type ValidateHost ValidateHostRequestType func init() { @@ -48789,13 +52196,15 @@ func init() { type VirtualDevice struct { DynamicData - Key int32 `xml:"key"` - DeviceInfo BaseDescription `xml:"deviceInfo,omitempty,typeattr"` - Backing BaseVirtualDeviceBackingInfo `xml:"backing,omitempty,typeattr"` - Connectable *VirtualDeviceConnectInfo `xml:"connectable,omitempty"` - SlotInfo BaseVirtualDeviceBusSlotInfo `xml:"slotInfo,omitempty,typeattr"` - ControllerKey int32 `xml:"controllerKey,omitempty"` - UnitNumber *int32 `xml:"unitNumber"` + Key int32 `xml:"key"` + DeviceInfo BaseDescription `xml:"deviceInfo,omitempty,typeattr"` + Backing BaseVirtualDeviceBackingInfo `xml:"backing,omitempty,typeattr"` + Connectable *VirtualDeviceConnectInfo `xml:"connectable,omitempty"` + SlotInfo BaseVirtualDeviceBusSlotInfo `xml:"slotInfo,omitempty,typeattr"` + ControllerKey int32 `xml:"controllerKey,omitempty"` + UnitNumber *int32 `xml:"unitNumber"` + NumaNode int32 `xml:"numaNode,omitempty"` + DeviceGroupInfo *VirtualDeviceDeviceGroupInfo `xml:"deviceGroupInfo,omitempty"` } func init() { @@ -48841,11 +52250,13 @@ func init() { type VirtualDeviceConfigSpec struct { DynamicData - Operation VirtualDeviceConfigSpecOperation `xml:"operation,omitempty"` - FileOperation VirtualDeviceConfigSpecFileOperation `xml:"fileOperation,omitempty"` - Device BaseVirtualDevice `xml:"device,typeattr"` - Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` - Backing *VirtualDeviceConfigSpecBackingSpec `xml:"backing,omitempty"` + Operation VirtualDeviceConfigSpecOperation `xml:"operation,omitempty"` + FileOperation VirtualDeviceConfigSpecFileOperation `xml:"fileOperation,omitempty"` + Device BaseVirtualDevice `xml:"device,typeattr"` + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` + Backing *VirtualDeviceConfigSpecBackingSpec `xml:"backing,omitempty"` + FilterSpec []BaseVirtualMachineBaseIndependentFilterSpec `xml:"filterSpec,omitempty,typeattr"` + ChangeMode string `xml:"changeMode,omitempty"` } func init() { @@ -48909,6 +52320,17 @@ func init() { t["VirtualDeviceDeviceBackingOption"] = reflect.TypeOf((*VirtualDeviceDeviceBackingOption)(nil)).Elem() } +type VirtualDeviceDeviceGroupInfo struct { + DynamicData + + GroupInstanceKey int32 `xml:"groupInstanceKey"` + SequenceId int32 `xml:"sequenceId"` +} + +func init() { + t["VirtualDeviceDeviceGroupInfo"] = reflect.TypeOf((*VirtualDeviceDeviceGroupInfo)(nil)).Elem() +} + type VirtualDeviceFileBackingInfo struct { VirtualDeviceBackingInfo @@ -48945,6 +52367,7 @@ type VirtualDeviceOption struct { Deprecated bool `xml:"deprecated"` PlugAndPlay bool `xml:"plugAndPlay"` HotRemoveSupported *bool `xml:"hotRemoveSupported"` + NumaSupported *bool `xml:"numaSupported"` } func init() { @@ -49025,15 +52448,16 @@ func init() { type VirtualDisk struct { VirtualDevice - CapacityInKB int64 `xml:"capacityInKB"` - CapacityInBytes int64 `xml:"capacityInBytes,omitempty"` - Shares *SharesInfo `xml:"shares,omitempty"` - StorageIOAllocation *StorageIOAllocationInfo `xml:"storageIOAllocation,omitempty"` - DiskObjectId string `xml:"diskObjectId,omitempty"` - VFlashCacheConfigInfo *VirtualDiskVFlashCacheConfigInfo `xml:"vFlashCacheConfigInfo,omitempty"` - Iofilter []string `xml:"iofilter,omitempty"` - VDiskId *ID `xml:"vDiskId,omitempty"` - NativeUnmanagedLinkedClone *bool `xml:"nativeUnmanagedLinkedClone"` + CapacityInKB int64 `xml:"capacityInKB"` + CapacityInBytes int64 `xml:"capacityInBytes,omitempty"` + Shares *SharesInfo `xml:"shares,omitempty"` + StorageIOAllocation *StorageIOAllocationInfo `xml:"storageIOAllocation,omitempty"` + DiskObjectId string `xml:"diskObjectId,omitempty"` + VFlashCacheConfigInfo *VirtualDiskVFlashCacheConfigInfo `xml:"vFlashCacheConfigInfo,omitempty"` + Iofilter []string `xml:"iofilter,omitempty"` + VDiskId *ID `xml:"vDiskId,omitempty"` + NativeUnmanagedLinkedClone *bool `xml:"nativeUnmanagedLinkedClone"` + IndependentFilters []BaseVirtualMachineBaseIndependentFilterSpec `xml:"independentFilters,omitempty,typeattr"` } func init() { @@ -49684,10 +53108,13 @@ type VirtualHardware struct { NumCPU int32 `xml:"numCPU"` NumCoresPerSocket int32 `xml:"numCoresPerSocket,omitempty"` + AutoCoresPerSocket *bool `xml:"autoCoresPerSocket"` MemoryMB int32 `xml:"memoryMB"` VirtualICH7MPresent *bool `xml:"virtualICH7MPresent"` VirtualSMCPresent *bool `xml:"virtualSMCPresent"` Device []BaseVirtualDevice `xml:"device,omitempty,typeattr"` + MotherboardLayout string `xml:"motherboardLayout,omitempty"` + SimultaneousThreads int32 `xml:"simultaneousThreads,omitempty"` } func init() { @@ -49711,25 +53138,34 @@ func init() { type VirtualHardwareOption struct { DynamicData - HwVersion int32 `xml:"hwVersion"` - VirtualDeviceOption []BaseVirtualDeviceOption `xml:"virtualDeviceOption,typeattr"` - DeviceListReadonly bool `xml:"deviceListReadonly"` - NumCPU []int32 `xml:"numCPU"` - NumCoresPerSocket *IntOption `xml:"numCoresPerSocket,omitempty"` - NumCpuReadonly bool `xml:"numCpuReadonly"` - MemoryMB LongOption `xml:"memoryMB"` - NumPCIControllers IntOption `xml:"numPCIControllers"` - NumIDEControllers IntOption `xml:"numIDEControllers"` - NumUSBControllers IntOption `xml:"numUSBControllers"` - NumUSBXHCIControllers *IntOption `xml:"numUSBXHCIControllers,omitempty"` - NumSIOControllers IntOption `xml:"numSIOControllers"` - NumPS2Controllers IntOption `xml:"numPS2Controllers"` - LicensingLimit []string `xml:"licensingLimit,omitempty"` - NumSupportedWwnPorts *IntOption `xml:"numSupportedWwnPorts,omitempty"` - NumSupportedWwnNodes *IntOption `xml:"numSupportedWwnNodes,omitempty"` - ResourceConfigOption *ResourceConfigOption `xml:"resourceConfigOption,omitempty"` - NumNVDIMMControllers *IntOption `xml:"numNVDIMMControllers,omitempty"` - NumTPMDevices *IntOption `xml:"numTPMDevices,omitempty"` + HwVersion int32 `xml:"hwVersion"` + VirtualDeviceOption []BaseVirtualDeviceOption `xml:"virtualDeviceOption,typeattr"` + DeviceListReadonly bool `xml:"deviceListReadonly"` + NumCPU []int32 `xml:"numCPU"` + NumCoresPerSocket *IntOption `xml:"numCoresPerSocket,omitempty"` + AutoCoresPerSocket *BoolOption `xml:"autoCoresPerSocket,omitempty"` + NumCpuReadonly bool `xml:"numCpuReadonly"` + MemoryMB LongOption `xml:"memoryMB"` + NumPCIControllers IntOption `xml:"numPCIControllers"` + NumIDEControllers IntOption `xml:"numIDEControllers"` + NumUSBControllers IntOption `xml:"numUSBControllers"` + NumUSBXHCIControllers *IntOption `xml:"numUSBXHCIControllers,omitempty"` + NumSIOControllers IntOption `xml:"numSIOControllers"` + NumPS2Controllers IntOption `xml:"numPS2Controllers"` + LicensingLimit []string `xml:"licensingLimit,omitempty"` + NumSupportedWwnPorts *IntOption `xml:"numSupportedWwnPorts,omitempty"` + NumSupportedWwnNodes *IntOption `xml:"numSupportedWwnNodes,omitempty"` + ResourceConfigOption *ResourceConfigOption `xml:"resourceConfigOption,omitempty"` + NumNVDIMMControllers *IntOption `xml:"numNVDIMMControllers,omitempty"` + NumTPMDevices *IntOption `xml:"numTPMDevices,omitempty"` + NumWDTDevices *IntOption `xml:"numWDTDevices,omitempty"` + NumPrecisionClockDevices *IntOption `xml:"numPrecisionClockDevices,omitempty"` + EpcMemoryMB *LongOption `xml:"epcMemoryMB,omitempty"` + AcpiHostBridgesFirmware []string `xml:"acpiHostBridgesFirmware,omitempty"` + NumCpuSimultaneousThreads *IntOption `xml:"numCpuSimultaneousThreads,omitempty"` + NumNumaNodes *IntOption `xml:"numNumaNodes,omitempty"` + NumDeviceGroups *IntOption `xml:"numDeviceGroups,omitempty"` + DeviceGroupTypes []string `xml:"deviceGroupTypes,omitempty"` } func init() { @@ -49846,6 +53282,14 @@ func init() { t["VirtualMachineAffinityInfo"] = reflect.TypeOf((*VirtualMachineAffinityInfo)(nil)).Elem() } +type VirtualMachineBaseIndependentFilterSpec struct { + DynamicData +} + +func init() { + t["VirtualMachineBaseIndependentFilterSpec"] = reflect.TypeOf((*VirtualMachineBaseIndependentFilterSpec)(nil)).Elem() +} + type VirtualMachineBootOptions struct { DynamicData @@ -49951,6 +53395,12 @@ type VirtualMachineCapability struct { VirtualMmuUsageIgnored *bool `xml:"virtualMmuUsageIgnored"` VirtualExecUsageIgnored *bool `xml:"virtualExecUsageIgnored"` DiskOnlySnapshotOnSuspendedVMSupported *bool `xml:"diskOnlySnapshotOnSuspendedVMSupported"` + SuspendToMemorySupported *bool `xml:"suspendToMemorySupported"` + ToolsSyncTimeAllowSupported *bool `xml:"toolsSyncTimeAllowSupported"` + SevSupported *bool `xml:"sevSupported"` + PmemFailoverSupported *bool `xml:"pmemFailoverSupported"` + RequireSgxAttestationSupported *bool `xml:"requireSgxAttestationSupported"` + ChangeModeDisksSupported *bool `xml:"changeModeDisksSupported"` } func init() { @@ -49967,16 +53417,28 @@ func init() { t["VirtualMachineCdromInfo"] = reflect.TypeOf((*VirtualMachineCdromInfo)(nil)).Elem() } +type VirtualMachineCertThumbprint struct { + DynamicData + + Thumbprint string `xml:"thumbprint"` + HashAlgorithm string `xml:"hashAlgorithm,omitempty"` +} + +func init() { + t["VirtualMachineCertThumbprint"] = reflect.TypeOf((*VirtualMachineCertThumbprint)(nil)).Elem() +} + type VirtualMachineCloneSpec struct { DynamicData - Location VirtualMachineRelocateSpec `xml:"location"` - Template bool `xml:"template"` - Config *VirtualMachineConfigSpec `xml:"config,omitempty"` - Customization *CustomizationSpec `xml:"customization,omitempty"` - PowerOn bool `xml:"powerOn"` - Snapshot *ManagedObjectReference `xml:"snapshot,omitempty"` - Memory *bool `xml:"memory"` + Location VirtualMachineRelocateSpec `xml:"location"` + Template bool `xml:"template"` + Config *VirtualMachineConfigSpec `xml:"config,omitempty"` + Customization *CustomizationSpec `xml:"customization,omitempty"` + PowerOn bool `xml:"powerOn"` + Snapshot *ManagedObjectReference `xml:"snapshot,omitempty"` + Memory *bool `xml:"memory"` + TpmProvisionPolicy string `xml:"tpmProvisionPolicy,omitempty"` } func init() { @@ -50011,7 +53473,9 @@ type VirtualMachineConfigInfo struct { Flags VirtualMachineFlagInfo `xml:"flags"` ConsolePreferences *VirtualMachineConsolePreferences `xml:"consolePreferences,omitempty"` DefaultPowerOps VirtualMachineDefaultPowerOpInfo `xml:"defaultPowerOps"` + RebootPowerOff *bool `xml:"rebootPowerOff"` Hardware VirtualHardware `xml:"hardware"` + VcpuConfig []VirtualMachineVcpuConfig `xml:"vcpuConfig,omitempty"` CpuAllocation *ResourceAllocationInfo `xml:"cpuAllocation,omitempty"` MemoryAllocation *ResourceAllocationInfo `xml:"memoryAllocation,omitempty"` LatencySensitivity *LatencySensitivity `xml:"latencySensitivity,omitempty"` @@ -50051,6 +53515,19 @@ type VirtualMachineConfigInfo struct { KeyId *CryptoKeyId `xml:"keyId,omitempty"` GuestIntegrityInfo *VirtualMachineGuestIntegrityInfo `xml:"guestIntegrityInfo,omitempty"` MigrateEncryption string `xml:"migrateEncryption,omitempty"` + SgxInfo *VirtualMachineSgxInfo `xml:"sgxInfo,omitempty"` + ContentLibItemInfo *VirtualMachineContentLibraryItemInfo `xml:"contentLibItemInfo,omitempty"` + FtEncryptionMode string `xml:"ftEncryptionMode,omitempty"` + GuestMonitoringModeInfo *VirtualMachineGuestMonitoringModeInfo `xml:"guestMonitoringModeInfo,omitempty"` + SevEnabled *bool `xml:"sevEnabled"` + NumaInfo *VirtualMachineVirtualNumaInfo `xml:"numaInfo,omitempty"` + PmemFailoverEnabled *bool `xml:"pmemFailoverEnabled"` + VmxStatsCollectionEnabled *bool `xml:"vmxStatsCollectionEnabled"` + VmOpNotificationToAppEnabled *bool `xml:"vmOpNotificationToAppEnabled"` + VmOpNotificationTimeout int64 `xml:"vmOpNotificationTimeout,omitempty"` + DeviceSwap *VirtualMachineVirtualDeviceSwap `xml:"deviceSwap,omitempty"` + Pmem *VirtualMachineVirtualPMem `xml:"pmem,omitempty"` + DeviceGroups *VirtualMachineVirtualDeviceGroups `xml:"deviceGroups,omitempty"` } func init() { @@ -50119,66 +53596,82 @@ func init() { type VirtualMachineConfigSpec struct { DynamicData - ChangeVersion string `xml:"changeVersion,omitempty"` - Name string `xml:"name,omitempty"` - Version string `xml:"version,omitempty"` - CreateDate *time.Time `xml:"createDate"` - Uuid string `xml:"uuid,omitempty"` - InstanceUuid string `xml:"instanceUuid,omitempty"` - NpivNodeWorldWideName []int64 `xml:"npivNodeWorldWideName,omitempty"` - NpivPortWorldWideName []int64 `xml:"npivPortWorldWideName,omitempty"` - NpivWorldWideNameType string `xml:"npivWorldWideNameType,omitempty"` - NpivDesiredNodeWwns int16 `xml:"npivDesiredNodeWwns,omitempty"` - NpivDesiredPortWwns int16 `xml:"npivDesiredPortWwns,omitempty"` - NpivTemporaryDisabled *bool `xml:"npivTemporaryDisabled"` - NpivOnNonRdmDisks *bool `xml:"npivOnNonRdmDisks"` - NpivWorldWideNameOp string `xml:"npivWorldWideNameOp,omitempty"` - LocationId string `xml:"locationId,omitempty"` - GuestId string `xml:"guestId,omitempty"` - AlternateGuestName string `xml:"alternateGuestName,omitempty"` - Annotation string `xml:"annotation,omitempty"` - Files *VirtualMachineFileInfo `xml:"files,omitempty"` - Tools *ToolsConfigInfo `xml:"tools,omitempty"` - Flags *VirtualMachineFlagInfo `xml:"flags,omitempty"` - ConsolePreferences *VirtualMachineConsolePreferences `xml:"consolePreferences,omitempty"` - PowerOpInfo *VirtualMachineDefaultPowerOpInfo `xml:"powerOpInfo,omitempty"` - NumCPUs int32 `xml:"numCPUs,omitempty"` - NumCoresPerSocket int32 `xml:"numCoresPerSocket,omitempty"` - MemoryMB int64 `xml:"memoryMB,omitempty"` - MemoryHotAddEnabled *bool `xml:"memoryHotAddEnabled"` - CpuHotAddEnabled *bool `xml:"cpuHotAddEnabled"` - CpuHotRemoveEnabled *bool `xml:"cpuHotRemoveEnabled"` - VirtualICH7MPresent *bool `xml:"virtualICH7MPresent"` - VirtualSMCPresent *bool `xml:"virtualSMCPresent"` - DeviceChange []BaseVirtualDeviceConfigSpec `xml:"deviceChange,omitempty,typeattr"` - CpuAllocation *ResourceAllocationInfo `xml:"cpuAllocation,omitempty"` - MemoryAllocation *ResourceAllocationInfo `xml:"memoryAllocation,omitempty"` - LatencySensitivity *LatencySensitivity `xml:"latencySensitivity,omitempty"` - CpuAffinity *VirtualMachineAffinityInfo `xml:"cpuAffinity,omitempty"` - MemoryAffinity *VirtualMachineAffinityInfo `xml:"memoryAffinity,omitempty"` - NetworkShaper *VirtualMachineNetworkShaperInfo `xml:"networkShaper,omitempty"` - CpuFeatureMask []VirtualMachineCpuIdInfoSpec `xml:"cpuFeatureMask,omitempty"` - ExtraConfig []BaseOptionValue `xml:"extraConfig,omitempty,typeattr"` - SwapPlacement string `xml:"swapPlacement,omitempty"` - BootOptions *VirtualMachineBootOptions `xml:"bootOptions,omitempty"` - VAppConfig BaseVmConfigSpec `xml:"vAppConfig,omitempty,typeattr"` - FtInfo BaseFaultToleranceConfigInfo `xml:"ftInfo,omitempty,typeattr"` - RepConfig *ReplicationConfigSpec `xml:"repConfig,omitempty"` - VAppConfigRemoved *bool `xml:"vAppConfigRemoved"` - VAssertsEnabled *bool `xml:"vAssertsEnabled"` - ChangeTrackingEnabled *bool `xml:"changeTrackingEnabled"` - Firmware string `xml:"firmware,omitempty"` - MaxMksConnections int32 `xml:"maxMksConnections,omitempty"` - GuestAutoLockEnabled *bool `xml:"guestAutoLockEnabled"` - ManagedBy *ManagedByInfo `xml:"managedBy,omitempty"` - MemoryReservationLockedToMax *bool `xml:"memoryReservationLockedToMax"` - NestedHVEnabled *bool `xml:"nestedHVEnabled"` - VPMCEnabled *bool `xml:"vPMCEnabled"` - ScheduledHardwareUpgradeInfo *ScheduledHardwareUpgradeInfo `xml:"scheduledHardwareUpgradeInfo,omitempty"` - VmProfile []BaseVirtualMachineProfileSpec `xml:"vmProfile,omitempty,typeattr"` - MessageBusTunnelEnabled *bool `xml:"messageBusTunnelEnabled"` - Crypto BaseCryptoSpec `xml:"crypto,omitempty,typeattr"` - MigrateEncryption string `xml:"migrateEncryption,omitempty"` + ChangeVersion string `xml:"changeVersion,omitempty"` + Name string `xml:"name,omitempty"` + Version string `xml:"version,omitempty"` + CreateDate *time.Time `xml:"createDate"` + Uuid string `xml:"uuid,omitempty"` + InstanceUuid string `xml:"instanceUuid,omitempty"` + NpivNodeWorldWideName []int64 `xml:"npivNodeWorldWideName,omitempty"` + NpivPortWorldWideName []int64 `xml:"npivPortWorldWideName,omitempty"` + NpivWorldWideNameType string `xml:"npivWorldWideNameType,omitempty"` + NpivDesiredNodeWwns int16 `xml:"npivDesiredNodeWwns,omitempty"` + NpivDesiredPortWwns int16 `xml:"npivDesiredPortWwns,omitempty"` + NpivTemporaryDisabled *bool `xml:"npivTemporaryDisabled"` + NpivOnNonRdmDisks *bool `xml:"npivOnNonRdmDisks"` + NpivWorldWideNameOp string `xml:"npivWorldWideNameOp,omitempty"` + LocationId string `xml:"locationId,omitempty"` + GuestId string `xml:"guestId,omitempty"` + AlternateGuestName string `xml:"alternateGuestName,omitempty"` + Annotation string `xml:"annotation,omitempty"` + Files *VirtualMachineFileInfo `xml:"files,omitempty"` + Tools *ToolsConfigInfo `xml:"tools,omitempty"` + Flags *VirtualMachineFlagInfo `xml:"flags,omitempty"` + ConsolePreferences *VirtualMachineConsolePreferences `xml:"consolePreferences,omitempty"` + PowerOpInfo *VirtualMachineDefaultPowerOpInfo `xml:"powerOpInfo,omitempty"` + RebootPowerOff *bool `xml:"rebootPowerOff"` + NumCPUs int32 `xml:"numCPUs,omitempty"` + VcpuConfig []VirtualMachineVcpuConfig `xml:"vcpuConfig,omitempty"` + NumCoresPerSocket int32 `xml:"numCoresPerSocket,omitempty"` + MemoryMB int64 `xml:"memoryMB,omitempty"` + MemoryHotAddEnabled *bool `xml:"memoryHotAddEnabled"` + CpuHotAddEnabled *bool `xml:"cpuHotAddEnabled"` + CpuHotRemoveEnabled *bool `xml:"cpuHotRemoveEnabled"` + VirtualICH7MPresent *bool `xml:"virtualICH7MPresent"` + VirtualSMCPresent *bool `xml:"virtualSMCPresent"` + DeviceChange []BaseVirtualDeviceConfigSpec `xml:"deviceChange,omitempty,typeattr"` + CpuAllocation *ResourceAllocationInfo `xml:"cpuAllocation,omitempty"` + MemoryAllocation *ResourceAllocationInfo `xml:"memoryAllocation,omitempty"` + LatencySensitivity *LatencySensitivity `xml:"latencySensitivity,omitempty"` + CpuAffinity *VirtualMachineAffinityInfo `xml:"cpuAffinity,omitempty"` + MemoryAffinity *VirtualMachineAffinityInfo `xml:"memoryAffinity,omitempty"` + NetworkShaper *VirtualMachineNetworkShaperInfo `xml:"networkShaper,omitempty"` + CpuFeatureMask []VirtualMachineCpuIdInfoSpec `xml:"cpuFeatureMask,omitempty"` + ExtraConfig []BaseOptionValue `xml:"extraConfig,omitempty,typeattr"` + SwapPlacement string `xml:"swapPlacement,omitempty"` + BootOptions *VirtualMachineBootOptions `xml:"bootOptions,omitempty"` + VAppConfig BaseVmConfigSpec `xml:"vAppConfig,omitempty,typeattr"` + FtInfo BaseFaultToleranceConfigInfo `xml:"ftInfo,omitempty,typeattr"` + RepConfig *ReplicationConfigSpec `xml:"repConfig,omitempty"` + VAppConfigRemoved *bool `xml:"vAppConfigRemoved"` + VAssertsEnabled *bool `xml:"vAssertsEnabled"` + ChangeTrackingEnabled *bool `xml:"changeTrackingEnabled"` + Firmware string `xml:"firmware,omitempty"` + MaxMksConnections int32 `xml:"maxMksConnections,omitempty"` + GuestAutoLockEnabled *bool `xml:"guestAutoLockEnabled"` + ManagedBy *ManagedByInfo `xml:"managedBy,omitempty"` + MemoryReservationLockedToMax *bool `xml:"memoryReservationLockedToMax"` + NestedHVEnabled *bool `xml:"nestedHVEnabled"` + VPMCEnabled *bool `xml:"vPMCEnabled"` + ScheduledHardwareUpgradeInfo *ScheduledHardwareUpgradeInfo `xml:"scheduledHardwareUpgradeInfo,omitempty"` + VmProfile []BaseVirtualMachineProfileSpec `xml:"vmProfile,omitempty,typeattr"` + MessageBusTunnelEnabled *bool `xml:"messageBusTunnelEnabled"` + Crypto BaseCryptoSpec `xml:"crypto,omitempty,typeattr"` + MigrateEncryption string `xml:"migrateEncryption,omitempty"` + SgxInfo *VirtualMachineSgxInfo `xml:"sgxInfo,omitempty"` + FtEncryptionMode string `xml:"ftEncryptionMode,omitempty"` + GuestMonitoringModeInfo *VirtualMachineGuestMonitoringModeInfo `xml:"guestMonitoringModeInfo,omitempty"` + SevEnabled *bool `xml:"sevEnabled"` + VirtualNuma *VirtualMachineVirtualNuma `xml:"virtualNuma,omitempty"` + MotherboardLayout string `xml:"motherboardLayout,omitempty"` + PmemFailoverEnabled *bool `xml:"pmemFailoverEnabled"` + VmxStatsCollectionEnabled *bool `xml:"vmxStatsCollectionEnabled"` + VmOpNotificationToAppEnabled *bool `xml:"vmOpNotificationToAppEnabled"` + VmOpNotificationTimeout int64 `xml:"vmOpNotificationTimeout,omitempty"` + DeviceSwap *VirtualMachineVirtualDeviceSwap `xml:"deviceSwap,omitempty"` + SimultaneousThreads int32 `xml:"simultaneousThreads,omitempty"` + Pmem *VirtualMachineVirtualPMem `xml:"pmem,omitempty"` + DeviceGroups *VirtualMachineVirtualDeviceGroups `xml:"deviceGroups,omitempty"` } func init() { @@ -50208,12 +53701,25 @@ type VirtualMachineConfigSummary struct { ManagedBy *ManagedByInfo `xml:"managedBy,omitempty"` TpmPresent *bool `xml:"tpmPresent"` NumVmiopBackings int32 `xml:"numVmiopBackings,omitempty"` + HwVersion string `xml:"hwVersion,omitempty"` } func init() { t["VirtualMachineConfigSummary"] = reflect.TypeOf((*VirtualMachineConfigSummary)(nil)).Elem() } +type VirtualMachineConnection struct { + DynamicData + + Label string `xml:"label"` + Client string `xml:"client"` + UserName string `xml:"userName"` +} + +func init() { + t["VirtualMachineConnection"] = reflect.TypeOf((*VirtualMachineConnection)(nil)).Elem() +} + type VirtualMachineConsolePreferences struct { DynamicData @@ -50226,6 +53732,17 @@ func init() { t["VirtualMachineConsolePreferences"] = reflect.TypeOf((*VirtualMachineConsolePreferences)(nil)).Elem() } +type VirtualMachineContentLibraryItemInfo struct { + DynamicData + + ContentLibraryItemUuid string `xml:"contentLibraryItemUuid"` + ContentLibraryItemVersion string `xml:"contentLibraryItemVersion,omitempty"` +} + +func init() { + t["VirtualMachineContentLibraryItemInfo"] = reflect.TypeOf((*VirtualMachineContentLibraryItemInfo)(nil)).Elem() +} + type VirtualMachineCpuIdInfoSpec struct { ArrayUpdateSpec @@ -50327,6 +53844,9 @@ type VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeState struct { VmDirectPathGen2InactiveReasonVm []string `xml:"vmDirectPathGen2InactiveReasonVm,omitempty"` VmDirectPathGen2InactiveReasonOther []string `xml:"vmDirectPathGen2InactiveReasonOther,omitempty"` VmDirectPathGen2InactiveReasonExtended string `xml:"vmDirectPathGen2InactiveReasonExtended,omitempty"` + Uptv2Active *bool `xml:"uptv2Active"` + Uptv2InactiveReasonVm []string `xml:"uptv2InactiveReasonVm,omitempty"` + Uptv2InactiveReasonOther []string `xml:"uptv2InactiveReasonOther,omitempty"` ReservationStatus string `xml:"reservationStatus,omitempty"` AttachmentStatus string `xml:"attachmentStatus,omitempty"` FeatureRequirement []VirtualMachineFeatureRequirement `xml:"featureRequirement,omitempty"` @@ -50360,6 +53880,41 @@ func init() { t["VirtualMachineDisplayTopology"] = reflect.TypeOf((*VirtualMachineDisplayTopology)(nil)).Elem() } +type VirtualMachineDvxClassInfo struct { + DynamicData + + DeviceClass BaseElementDescription `xml:"deviceClass,typeattr"` + VendorName string `xml:"vendorName"` + SriovNic bool `xml:"sriovNic"` + ConfigParams []OptionDef `xml:"configParams,omitempty"` +} + +func init() { + t["VirtualMachineDvxClassInfo"] = reflect.TypeOf((*VirtualMachineDvxClassInfo)(nil)).Elem() +} + +type VirtualMachineDynamicPassthroughInfo struct { + VirtualMachineTargetInfo + + VendorName string `xml:"vendorName"` + DeviceName string `xml:"deviceName"` + CustomLabel string `xml:"customLabel,omitempty"` + VendorId int32 `xml:"vendorId"` + DeviceId int32 `xml:"deviceId"` +} + +func init() { + t["VirtualMachineDynamicPassthroughInfo"] = reflect.TypeOf((*VirtualMachineDynamicPassthroughInfo)(nil)).Elem() +} + +type VirtualMachineEmptyIndependentFilterSpec struct { + VirtualMachineBaseIndependentFilterSpec +} + +func init() { + t["VirtualMachineEmptyIndependentFilterSpec"] = reflect.TypeOf((*VirtualMachineEmptyIndependentFilterSpec)(nil)).Elem() +} + type VirtualMachineEmptyProfileSpec struct { VirtualMachineProfileSpec } @@ -50550,6 +54105,17 @@ func init() { t["VirtualMachineGuestIntegrityInfo"] = reflect.TypeOf((*VirtualMachineGuestIntegrityInfo)(nil)).Elem() } +type VirtualMachineGuestMonitoringModeInfo struct { + DynamicData + + GmmFile string `xml:"gmmFile,omitempty"` + GmmAppliance string `xml:"gmmAppliance,omitempty"` +} + +func init() { + t["VirtualMachineGuestMonitoringModeInfo"] = reflect.TypeOf((*VirtualMachineGuestMonitoringModeInfo)(nil)).Elem() +} + type VirtualMachineGuestQuiesceSpec struct { DynamicData @@ -50571,6 +54137,7 @@ type VirtualMachineGuestSummary struct { ToolsRunningStatus string `xml:"toolsRunningStatus,omitempty"` HostName string `xml:"hostName,omitempty"` IpAddress string `xml:"ipAddress,omitempty"` + HwVersion string `xml:"hwVersion,omitempty"` } func init() { @@ -50609,6 +54176,18 @@ func init() { t["VirtualMachineImportSpec"] = reflect.TypeOf((*VirtualMachineImportSpec)(nil)).Elem() } +type VirtualMachineIndependentFilterSpec struct { + VirtualMachineBaseIndependentFilterSpec + + FilterName string `xml:"filterName"` + FilterClass string `xml:"filterClass,omitempty"` + FilterCapabilities []KeyValue `xml:"filterCapabilities,omitempty"` +} + +func init() { + t["VirtualMachineIndependentFilterSpec"] = reflect.TypeOf((*VirtualMachineIndependentFilterSpec)(nil)).Elem() +} + type VirtualMachineInstantCloneSpec struct { DynamicData @@ -50711,6 +54290,14 @@ func init() { t["VirtualMachineMetadataManagerVmMetadataResult"] = reflect.TypeOf((*VirtualMachineMetadataManagerVmMetadataResult)(nil)).Elem() } +type VirtualMachineMksConnection struct { + VirtualMachineConnection +} + +func init() { + t["VirtualMachineMksConnection"] = reflect.TypeOf((*VirtualMachineMksConnection)(nil)).Elem() +} + type VirtualMachineMksTicket struct { DynamicData @@ -50778,6 +54365,16 @@ func init() { t["VirtualMachinePciSharedGpuPassthroughInfo"] = reflect.TypeOf((*VirtualMachinePciSharedGpuPassthroughInfo)(nil)).Elem() } +type VirtualMachinePrecisionClockInfo struct { + VirtualMachineTargetInfo + + SystemClockProtocol string `xml:"systemClockProtocol,omitempty"` +} + +func init() { + t["VirtualMachinePrecisionClockInfo"] = reflect.TypeOf((*VirtualMachinePrecisionClockInfo)(nil)).Elem() +} + type VirtualMachineProfileDetails struct { DynamicData @@ -50846,32 +54443,47 @@ func init() { type VirtualMachineQuickStats struct { DynamicData - OverallCpuUsage int32 `xml:"overallCpuUsage,omitempty"` - OverallCpuDemand int32 `xml:"overallCpuDemand,omitempty"` - GuestMemoryUsage int32 `xml:"guestMemoryUsage,omitempty"` - HostMemoryUsage int32 `xml:"hostMemoryUsage,omitempty"` - GuestHeartbeatStatus ManagedEntityStatus `xml:"guestHeartbeatStatus"` - DistributedCpuEntitlement int32 `xml:"distributedCpuEntitlement,omitempty"` - DistributedMemoryEntitlement int32 `xml:"distributedMemoryEntitlement,omitempty"` - StaticCpuEntitlement int32 `xml:"staticCpuEntitlement,omitempty"` - StaticMemoryEntitlement int32 `xml:"staticMemoryEntitlement,omitempty"` - PrivateMemory int32 `xml:"privateMemory,omitempty"` - SharedMemory int32 `xml:"sharedMemory,omitempty"` - SwappedMemory int32 `xml:"swappedMemory,omitempty"` - BalloonedMemory int32 `xml:"balloonedMemory,omitempty"` - ConsumedOverheadMemory int32 `xml:"consumedOverheadMemory,omitempty"` - FtLogBandwidth int32 `xml:"ftLogBandwidth,omitempty"` - FtSecondaryLatency int32 `xml:"ftSecondaryLatency,omitempty"` - FtLatencyStatus ManagedEntityStatus `xml:"ftLatencyStatus,omitempty"` - CompressedMemory int64 `xml:"compressedMemory,omitempty"` - UptimeSeconds int32 `xml:"uptimeSeconds,omitempty"` - SsdSwappedMemory int64 `xml:"ssdSwappedMemory,omitempty"` + OverallCpuUsage int32 `xml:"overallCpuUsage,omitempty"` + OverallCpuDemand int32 `xml:"overallCpuDemand,omitempty"` + OverallCpuReadiness int32 `xml:"overallCpuReadiness,omitempty"` + GuestMemoryUsage int32 `xml:"guestMemoryUsage,omitempty"` + HostMemoryUsage int32 `xml:"hostMemoryUsage,omitempty"` + GuestHeartbeatStatus ManagedEntityStatus `xml:"guestHeartbeatStatus"` + DistributedCpuEntitlement int32 `xml:"distributedCpuEntitlement,omitempty"` + DistributedMemoryEntitlement int32 `xml:"distributedMemoryEntitlement,omitempty"` + StaticCpuEntitlement int32 `xml:"staticCpuEntitlement,omitempty"` + StaticMemoryEntitlement int32 `xml:"staticMemoryEntitlement,omitempty"` + GrantedMemory int32 `xml:"grantedMemory,omitempty"` + PrivateMemory int32 `xml:"privateMemory,omitempty"` + SharedMemory int32 `xml:"sharedMemory,omitempty"` + SwappedMemory int32 `xml:"swappedMemory,omitempty"` + BalloonedMemory int32 `xml:"balloonedMemory,omitempty"` + ConsumedOverheadMemory int32 `xml:"consumedOverheadMemory,omitempty"` + FtLogBandwidth int32 `xml:"ftLogBandwidth,omitempty"` + FtSecondaryLatency int32 `xml:"ftSecondaryLatency,omitempty"` + FtLatencyStatus ManagedEntityStatus `xml:"ftLatencyStatus,omitempty"` + CompressedMemory int64 `xml:"compressedMemory,omitempty"` + UptimeSeconds int32 `xml:"uptimeSeconds,omitempty"` + SsdSwappedMemory int64 `xml:"ssdSwappedMemory,omitempty"` + ActiveMemory int32 `xml:"activeMemory,omitempty"` + MemoryTierStats []VirtualMachineQuickStatsMemoryTierStats `xml:"memoryTierStats,omitempty"` } func init() { t["VirtualMachineQuickStats"] = reflect.TypeOf((*VirtualMachineQuickStats)(nil)).Elem() } +type VirtualMachineQuickStatsMemoryTierStats struct { + DynamicData + + MemoryTierType string `xml:"memoryTierType"` + ReadBandwidth int64 `xml:"readBandwidth"` +} + +func init() { + t["VirtualMachineQuickStatsMemoryTierStats"] = reflect.TypeOf((*VirtualMachineQuickStatsMemoryTierStats)(nil)).Elem() +} + type VirtualMachineRelocateSpec struct { DynamicData @@ -50885,6 +54497,7 @@ type VirtualMachineRelocateSpec struct { Transform VirtualMachineRelocateTransformation `xml:"transform,omitempty"` DeviceChange []BaseVirtualDeviceConfigSpec `xml:"deviceChange,omitempty,typeattr"` Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` + CryptoSpec BaseCryptoSpec `xml:"cryptoSpec,omitempty,typeattr"` } func init() { @@ -50894,17 +54507,30 @@ func init() { type VirtualMachineRelocateSpecDiskLocator struct { DynamicData - DiskId int32 `xml:"diskId"` - Datastore ManagedObjectReference `xml:"datastore"` - DiskMoveType string `xml:"diskMoveType,omitempty"` - DiskBackingInfo BaseVirtualDeviceBackingInfo `xml:"diskBackingInfo,omitempty,typeattr"` - Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` + DiskId int32 `xml:"diskId"` + Datastore ManagedObjectReference `xml:"datastore"` + DiskMoveType string `xml:"diskMoveType,omitempty"` + DiskBackingInfo BaseVirtualDeviceBackingInfo `xml:"diskBackingInfo,omitempty,typeattr"` + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` + Backing *VirtualMachineRelocateSpecDiskLocatorBackingSpec `xml:"backing,omitempty"` + FilterSpec []BaseVirtualMachineBaseIndependentFilterSpec `xml:"filterSpec,omitempty,typeattr"` } func init() { t["VirtualMachineRelocateSpecDiskLocator"] = reflect.TypeOf((*VirtualMachineRelocateSpecDiskLocator)(nil)).Elem() } +type VirtualMachineRelocateSpecDiskLocatorBackingSpec struct { + DynamicData + + Parent *VirtualMachineRelocateSpecDiskLocatorBackingSpec `xml:"parent,omitempty"` + Crypto BaseCryptoSpec `xml:"crypto,omitempty,typeattr"` +} + +func init() { + t["VirtualMachineRelocateSpecDiskLocatorBackingSpec"] = reflect.TypeOf((*VirtualMachineRelocateSpecDiskLocatorBackingSpec)(nil)).Elem() +} + type VirtualMachineRuntimeInfo struct { DynamicData @@ -50912,6 +54538,7 @@ type VirtualMachineRuntimeInfo struct { Host *ManagedObjectReference `xml:"host,omitempty"` ConnectionState VirtualMachineConnectionState `xml:"connectionState"` PowerState VirtualMachinePowerState `xml:"powerState"` + VmFailoverInProgress *bool `xml:"vmFailoverInProgress"` FaultToleranceState VirtualMachineFaultToleranceState `xml:"faultToleranceState,omitempty"` DasVmProtection *VirtualMachineRuntimeInfoDasProtectionState `xml:"dasVmProtection,omitempty"` ToolsInstallerMounted bool `xml:"toolsInstallerMounted"` @@ -50938,6 +54565,8 @@ type VirtualMachineRuntimeInfo struct { QuiescedForkParent *bool `xml:"quiescedForkParent"` InstantCloneFrozen *bool `xml:"instantCloneFrozen"` CryptoState string `xml:"cryptoState,omitempty"` + SuspendedToMemory *bool `xml:"suspendedToMemory"` + OpNotificationTimeout int64 `xml:"opNotificationTimeout,omitempty"` } func init() { @@ -50986,6 +54615,32 @@ func init() { t["VirtualMachineSerialInfo"] = reflect.TypeOf((*VirtualMachineSerialInfo)(nil)).Elem() } +type VirtualMachineSgxInfo struct { + DynamicData + + EpcSize int64 `xml:"epcSize"` + FlcMode string `xml:"flcMode,omitempty"` + LePubKeyHash string `xml:"lePubKeyHash,omitempty"` + RequireAttestation *bool `xml:"requireAttestation"` +} + +func init() { + t["VirtualMachineSgxInfo"] = reflect.TypeOf((*VirtualMachineSgxInfo)(nil)).Elem() +} + +type VirtualMachineSgxTargetInfo struct { + VirtualMachineTargetInfo + + MaxEpcSize int64 `xml:"maxEpcSize"` + FlcModes []string `xml:"flcModes,omitempty"` + LePubKeyHashes []string `xml:"lePubKeyHashes,omitempty"` + RequireAttestationSupported *bool `xml:"requireAttestationSupported"` +} + +func init() { + t["VirtualMachineSgxTargetInfo"] = reflect.TypeOf((*VirtualMachineSgxTargetInfo)(nil)).Elem() +} + type VirtualMachineSnapshotInfo struct { DynamicData @@ -51113,11 +54768,13 @@ func init() { type VirtualMachineTicket struct { DynamicData - Ticket string `xml:"ticket"` - CfgFile string `xml:"cfgFile"` - Host string `xml:"host,omitempty"` - Port int32 `xml:"port,omitempty"` - SslThumbprint string `xml:"sslThumbprint,omitempty"` + Ticket string `xml:"ticket"` + CfgFile string `xml:"cfgFile"` + Host string `xml:"host,omitempty"` + Port int32 `xml:"port,omitempty"` + SslThumbprint string `xml:"sslThumbprint,omitempty"` + CertThumbprintList []VirtualMachineCertThumbprint `xml:"certThumbprintList,omitempty"` + Url string `xml:"url,omitempty"` } func init() { @@ -51235,6 +54892,72 @@ func init() { t["VirtualMachineVMIROM"] = reflect.TypeOf((*VirtualMachineVMIROM)(nil)).Elem() } +type VirtualMachineVcpuConfig struct { + DynamicData + + LatencySensitivity *LatencySensitivity `xml:"latencySensitivity,omitempty"` +} + +func init() { + t["VirtualMachineVcpuConfig"] = reflect.TypeOf((*VirtualMachineVcpuConfig)(nil)).Elem() +} + +type VirtualMachineVendorDeviceGroupInfo struct { + VirtualMachineTargetInfo + + DeviceGroupName string `xml:"deviceGroupName"` + DeviceGroupDescription string `xml:"deviceGroupDescription,omitempty"` + ComponentDeviceInfo []VirtualMachineVendorDeviceGroupInfoComponentDeviceInfo `xml:"componentDeviceInfo,omitempty"` +} + +func init() { + t["VirtualMachineVendorDeviceGroupInfo"] = reflect.TypeOf((*VirtualMachineVendorDeviceGroupInfo)(nil)).Elem() +} + +type VirtualMachineVendorDeviceGroupInfoComponentDeviceInfo struct { + DynamicData + + Type string `xml:"type"` + VendorName string `xml:"vendorName"` + DeviceName string `xml:"deviceName"` + IsConfigurable bool `xml:"isConfigurable"` + Device BaseVirtualDevice `xml:"device,typeattr"` +} + +func init() { + t["VirtualMachineVendorDeviceGroupInfoComponentDeviceInfo"] = reflect.TypeOf((*VirtualMachineVendorDeviceGroupInfoComponentDeviceInfo)(nil)).Elem() +} + +type VirtualMachineVgpuDeviceInfo struct { + VirtualMachineTargetInfo + + DeviceName string `xml:"deviceName"` + DeviceVendorId int64 `xml:"deviceVendorId"` + MaxFbSizeInGib int64 `xml:"maxFbSizeInGib"` + TimeSlicedCapable bool `xml:"timeSlicedCapable"` + MigCapable bool `xml:"migCapable"` + ComputeProfileCapable bool `xml:"computeProfileCapable"` + QuadroProfileCapable bool `xml:"quadroProfileCapable"` +} + +func init() { + t["VirtualMachineVgpuDeviceInfo"] = reflect.TypeOf((*VirtualMachineVgpuDeviceInfo)(nil)).Elem() +} + +type VirtualMachineVgpuProfileInfo struct { + VirtualMachineTargetInfo + + ProfileName string `xml:"profileName"` + DeviceVendorId int64 `xml:"deviceVendorId"` + FbSizeInGib int64 `xml:"fbSizeInGib"` + ProfileSharing string `xml:"profileSharing"` + ProfileClass string `xml:"profileClass"` +} + +func init() { + t["VirtualMachineVgpuProfileInfo"] = reflect.TypeOf((*VirtualMachineVgpuProfileInfo)(nil)).Elem() +} + type VirtualMachineVideoCard struct { VirtualDevice @@ -51250,6 +54973,92 @@ func init() { t["VirtualMachineVideoCard"] = reflect.TypeOf((*VirtualMachineVideoCard)(nil)).Elem() } +type VirtualMachineVirtualDeviceGroups struct { + DynamicData + + DeviceGroup []BaseVirtualMachineVirtualDeviceGroupsDeviceGroup `xml:"deviceGroup,omitempty,typeattr"` +} + +func init() { + t["VirtualMachineVirtualDeviceGroups"] = reflect.TypeOf((*VirtualMachineVirtualDeviceGroups)(nil)).Elem() +} + +type VirtualMachineVirtualDeviceGroupsDeviceGroup struct { + DynamicData + + GroupInstanceKey int32 `xml:"groupInstanceKey"` + DeviceInfo BaseDescription `xml:"deviceInfo,omitempty,typeattr"` +} + +func init() { + t["VirtualMachineVirtualDeviceGroupsDeviceGroup"] = reflect.TypeOf((*VirtualMachineVirtualDeviceGroupsDeviceGroup)(nil)).Elem() +} + +type VirtualMachineVirtualDeviceGroupsVendorDeviceGroup struct { + VirtualMachineVirtualDeviceGroupsDeviceGroup + + DeviceGroupName string `xml:"deviceGroupName"` +} + +func init() { + t["VirtualMachineVirtualDeviceGroupsVendorDeviceGroup"] = reflect.TypeOf((*VirtualMachineVirtualDeviceGroupsVendorDeviceGroup)(nil)).Elem() +} + +type VirtualMachineVirtualDeviceSwap struct { + DynamicData + + LsiToPvscsi *VirtualMachineVirtualDeviceSwapDeviceSwapInfo `xml:"lsiToPvscsi,omitempty"` +} + +func init() { + t["VirtualMachineVirtualDeviceSwap"] = reflect.TypeOf((*VirtualMachineVirtualDeviceSwap)(nil)).Elem() +} + +type VirtualMachineVirtualDeviceSwapDeviceSwapInfo struct { + DynamicData + + Enabled *bool `xml:"enabled"` + Applicable *bool `xml:"applicable"` + Status string `xml:"status,omitempty"` +} + +func init() { + t["VirtualMachineVirtualDeviceSwapDeviceSwapInfo"] = reflect.TypeOf((*VirtualMachineVirtualDeviceSwapDeviceSwapInfo)(nil)).Elem() +} + +type VirtualMachineVirtualNuma struct { + DynamicData + + CoresPerNumaNode int32 `xml:"coresPerNumaNode,omitempty"` + ExposeVnumaOnCpuHotadd *bool `xml:"exposeVnumaOnCpuHotadd"` +} + +func init() { + t["VirtualMachineVirtualNuma"] = reflect.TypeOf((*VirtualMachineVirtualNuma)(nil)).Elem() +} + +type VirtualMachineVirtualNumaInfo struct { + DynamicData + + CoresPerNumaNode int32 `xml:"coresPerNumaNode,omitempty"` + AutoCoresPerNumaNode *bool `xml:"autoCoresPerNumaNode"` + VnumaOnCpuHotaddExposed *bool `xml:"vnumaOnCpuHotaddExposed"` +} + +func init() { + t["VirtualMachineVirtualNumaInfo"] = reflect.TypeOf((*VirtualMachineVirtualNumaInfo)(nil)).Elem() +} + +type VirtualMachineVirtualPMem struct { + DynamicData + + SnapshotMode string `xml:"snapshotMode,omitempty"` +} + +func init() { + t["VirtualMachineVirtualPMem"] = reflect.TypeOf((*VirtualMachineVirtualPMem)(nil)).Elem() +} + type VirtualMachineWindowsQuiesceSpec struct { VirtualMachineGuestQuiesceSpec @@ -51277,7 +55086,8 @@ func init() { type VirtualNVDIMM struct { VirtualDevice - CapacityInMB int64 `xml:"capacityInMB"` + CapacityInMB int64 `xml:"capacityInMB"` + ConfiguredCapacityInMB int64 `xml:"configuredCapacityInMB,omitempty"` } func init() { @@ -51395,6 +55205,20 @@ func init() { t["VirtualPCIPassthrough"] = reflect.TypeOf((*VirtualPCIPassthrough)(nil)).Elem() } +type VirtualPCIPassthroughAllowedDevice struct { + DynamicData + + VendorId int32 `xml:"vendorId"` + DeviceId int32 `xml:"deviceId"` + SubVendorId int32 `xml:"subVendorId,omitempty"` + SubDeviceId int32 `xml:"subDeviceId,omitempty"` + RevisionId int16 `xml:"revisionId,omitempty"` +} + +func init() { + t["VirtualPCIPassthroughAllowedDevice"] = reflect.TypeOf((*VirtualPCIPassthroughAllowedDevice)(nil)).Elem() +} + type VirtualPCIPassthroughDeviceBackingInfo struct { VirtualDeviceDeviceBackingInfo @@ -51416,6 +55240,45 @@ func init() { t["VirtualPCIPassthroughDeviceBackingOption"] = reflect.TypeOf((*VirtualPCIPassthroughDeviceBackingOption)(nil)).Elem() } +type VirtualPCIPassthroughDvxBackingInfo struct { + VirtualDeviceBackingInfo + + DeviceClass string `xml:"deviceClass,omitempty"` + ConfigParams []BaseOptionValue `xml:"configParams,omitempty,typeattr"` +} + +func init() { + t["VirtualPCIPassthroughDvxBackingInfo"] = reflect.TypeOf((*VirtualPCIPassthroughDvxBackingInfo)(nil)).Elem() +} + +type VirtualPCIPassthroughDvxBackingOption struct { + VirtualDeviceBackingOption +} + +func init() { + t["VirtualPCIPassthroughDvxBackingOption"] = reflect.TypeOf((*VirtualPCIPassthroughDvxBackingOption)(nil)).Elem() +} + +type VirtualPCIPassthroughDynamicBackingInfo struct { + VirtualDeviceDeviceBackingInfo + + AllowedDevice []VirtualPCIPassthroughAllowedDevice `xml:"allowedDevice,omitempty"` + CustomLabel string `xml:"customLabel,omitempty"` + AssignedId string `xml:"assignedId,omitempty"` +} + +func init() { + t["VirtualPCIPassthroughDynamicBackingInfo"] = reflect.TypeOf((*VirtualPCIPassthroughDynamicBackingInfo)(nil)).Elem() +} + +type VirtualPCIPassthroughDynamicBackingOption struct { + VirtualDeviceDeviceBackingOption +} + +func init() { + t["VirtualPCIPassthroughDynamicBackingOption"] = reflect.TypeOf((*VirtualPCIPassthroughDynamicBackingOption)(nil)).Elem() +} + type VirtualPCIPassthroughOption struct { VirtualDeviceOption } @@ -51443,7 +55306,10 @@ func init() { type VirtualPCIPassthroughVmiopBackingInfo struct { VirtualPCIPassthroughPluginBackingInfo - Vgpu string `xml:"vgpu,omitempty"` + Vgpu string `xml:"vgpu,omitempty"` + VgpuMigrateDataSizeMB int32 `xml:"vgpuMigrateDataSizeMB,omitempty"` + MigrateSupported *bool `xml:"migrateSupported"` + EnhancedMigrateCapability *bool `xml:"enhancedMigrateCapability"` } func init() { @@ -51582,6 +55448,42 @@ func init() { t["VirtualPointingDeviceOption"] = reflect.TypeOf((*VirtualPointingDeviceOption)(nil)).Elem() } +type VirtualPrecisionClock struct { + VirtualDevice +} + +func init() { + t["VirtualPrecisionClock"] = reflect.TypeOf((*VirtualPrecisionClock)(nil)).Elem() +} + +type VirtualPrecisionClockOption struct { + VirtualDeviceOption +} + +func init() { + t["VirtualPrecisionClockOption"] = reflect.TypeOf((*VirtualPrecisionClockOption)(nil)).Elem() +} + +type VirtualPrecisionClockSystemClockBackingInfo struct { + VirtualDeviceBackingInfo + + Protocol string `xml:"protocol,omitempty"` +} + +func init() { + t["VirtualPrecisionClockSystemClockBackingInfo"] = reflect.TypeOf((*VirtualPrecisionClockSystemClockBackingInfo)(nil)).Elem() +} + +type VirtualPrecisionClockSystemClockBackingOption struct { + VirtualDeviceBackingOption + + Protocol ChoiceOption `xml:"protocol"` +} + +func init() { + t["VirtualPrecisionClockSystemClockBackingOption"] = reflect.TypeOf((*VirtualPrecisionClockSystemClockBackingOption)(nil)).Elem() +} + type VirtualSATAController struct { VirtualController } @@ -51840,6 +55742,7 @@ type VirtualSriovEthernetCard struct { AllowGuestOSMtuChange *bool `xml:"allowGuestOSMtuChange"` SriovBacking *VirtualSriovEthernetCardSriovBackingInfo `xml:"sriovBacking,omitempty"` + DvxBackingInfo *VirtualPCIPassthroughDvxBackingInfo `xml:"dvxBackingInfo,omitempty"` } func init() { @@ -52095,6 +55998,8 @@ func init() { type VirtualVmxnet3 struct { VirtualVmxnet + + Uptv2Enabled *bool `xml:"uptv2Enabled"` } func init() { @@ -52103,6 +56008,8 @@ func init() { type VirtualVmxnet3Option struct { VirtualVmxnetOption + + Uptv2Enabled *BoolOption `xml:"uptv2Enabled,omitempty"` } func init() { @@ -52137,6 +56044,27 @@ func init() { t["VirtualVmxnetOption"] = reflect.TypeOf((*VirtualVmxnetOption)(nil)).Elem() } +type VirtualWDT struct { + VirtualDevice + + RunOnBoot bool `xml:"runOnBoot"` + Running bool `xml:"running"` +} + +func init() { + t["VirtualWDT"] = reflect.TypeOf((*VirtualWDT)(nil)).Elem() +} + +type VirtualWDTOption struct { + VirtualDeviceOption + + RunOnBoot BoolOption `xml:"runOnBoot"` +} + +func init() { + t["VirtualWDTOption"] = reflect.TypeOf((*VirtualWDTOption)(nil)).Elem() +} + type VlanProfile struct { ApplyProfile } @@ -53874,6 +57802,7 @@ type VmfsConfigOption struct { UnmapBandwidthDynamicMin *LongOption `xml:"unmapBandwidthDynamicMin,omitempty"` UnmapBandwidthDynamicMax *LongOption `xml:"unmapBandwidthDynamicMax,omitempty"` UnmapBandwidthIncrement int64 `xml:"unmapBandwidthIncrement,omitempty"` + UnmapBandwidthUltraLow int64 `xml:"unmapBandwidthUltraLow,omitempty"` } func init() { @@ -54126,8 +58055,9 @@ func init() { type VsanClusterConfigInfo struct { DynamicData - Enabled *bool `xml:"enabled"` - DefaultConfig *VsanClusterConfigInfoHostDefaultInfo `xml:"defaultConfig,omitempty"` + Enabled *bool `xml:"enabled"` + DefaultConfig *VsanClusterConfigInfoHostDefaultInfo `xml:"defaultConfig,omitempty"` + VsanEsaEnabled *bool `xml:"vsanEsaEnabled"` } func init() { @@ -54163,6 +58093,17 @@ func init() { t["VsanClusterUuidMismatchFault"] = reflect.TypeOf((*VsanClusterUuidMismatchFault)(nil)).Elem() } +type VsanDatastoreInfo struct { + DatastoreInfo + + MembershipUuid string `xml:"membershipUuid,omitempty"` + AccessGenNo int32 `xml:"accessGenNo,omitempty"` +} + +func init() { + t["VsanDatastoreInfo"] = reflect.TypeOf((*VsanDatastoreInfo)(nil)).Elem() +} + type VsanDiskFault struct { VsanFault @@ -54238,6 +58179,7 @@ type VsanHostConfigInfo struct { StorageInfo *VsanHostConfigInfoStorageInfo `xml:"storageInfo,omitempty"` NetworkInfo *VsanHostConfigInfoNetworkInfo `xml:"networkInfo,omitempty"` FaultDomainInfo *VsanHostFaultDomainInfo `xml:"faultDomainInfo,omitempty"` + VsanEsaEnabled *bool `xml:"vsanEsaEnabled"` } func init() { @@ -54651,8 +58593,9 @@ func init() { type VslmCloneSpec struct { VslmMigrateSpec - Name string `xml:"name"` - KeepAfterDeleteVm *bool `xml:"keepAfterDeleteVm"` + Name string `xml:"name"` + KeepAfterDeleteVm *bool `xml:"keepAfterDeleteVm"` + Metadata []KeyValue `xml:"metadata,omitempty"` } func init() { @@ -54667,6 +58610,8 @@ type VslmCreateSpec struct { BackingSpec BaseVslmCreateSpecBackingSpec `xml:"backingSpec,typeattr"` CapacityInMB int64 `xml:"capacityInMB"` Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` + Crypto BaseCryptoSpec `xml:"crypto,omitempty,typeattr"` + Metadata []KeyValue `xml:"metadata,omitempty"` } func init() { @@ -54711,6 +58656,7 @@ type VslmMigrateSpec struct { BackingSpec BaseVslmCreateSpecBackingSpec `xml:"backingSpec,typeattr"` Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` Consolidate *bool `xml:"consolidate"` + DisksCrypto *DiskCryptoSpec `xml:"disksCrypto,omitempty"` } func init() { @@ -54872,6 +58818,29 @@ func init() { t["VspanSameSessionPortConflictFault"] = reflect.TypeOf((*VspanSameSessionPortConflictFault)(nil)).Elem() } +type VstorageObjectVCenterQueryChangedDiskAreas VstorageObjectVCenterQueryChangedDiskAreasRequestType + +func init() { + t["VstorageObjectVCenterQueryChangedDiskAreas"] = reflect.TypeOf((*VstorageObjectVCenterQueryChangedDiskAreas)(nil)).Elem() +} + +type VstorageObjectVCenterQueryChangedDiskAreasRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + SnapshotId ID `xml:"snapshotId"` + StartOffset int64 `xml:"startOffset"` + ChangeId string `xml:"changeId"` +} + +func init() { + t["VstorageObjectVCenterQueryChangedDiskAreasRequestType"] = reflect.TypeOf((*VstorageObjectVCenterQueryChangedDiskAreasRequestType)(nil)).Elem() +} + +type VstorageObjectVCenterQueryChangedDiskAreasResponse struct { + Returnval DiskChangeInfo `xml:"returnval"` +} + type VvolDatastoreInfo struct { DatastoreInfo @@ -55431,3 +59400,13 @@ type VslmInfrastructureObjectPolicySpec struct { func init() { t["vslmInfrastructureObjectPolicySpec"] = reflect.TypeOf((*VslmInfrastructureObjectPolicySpec)(nil)).Elem() } + +type VslmVClockInfo struct { + DynamicData + + VClockTime int64 `xml:"vClockTime"` +} + +func init() { + t["vslmVClockInfo"] = reflect.TypeOf((*VslmVClockInfo)(nil)).Elem() +} diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/types/unreleased.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/types/unreleased.go new file mode 100644 index 000000000000..72bc1082b876 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/types/unreleased.go @@ -0,0 +1,121 @@ +/* + Copyright (c) 2022 VMware, Inc. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import "reflect" + +type ArrayOfPlaceVmsXClusterResultPlacementFaults struct { + PlaceVmsXClusterResultPlacementFaults []PlaceVmsXClusterResultPlacementFaults `xml:"PlaceVmsXClusterResultPlacementFaults,omitempty"` +} + +func init() { + t["ArrayOfPlaceVmsXClusterResultPlacementFaults"] = reflect.TypeOf((*ArrayOfPlaceVmsXClusterResultPlacementFaults)(nil)).Elem() +} + +type ArrayOfPlaceVmsXClusterResultPlacementInfo struct { + PlaceVmsXClusterResultPlacementInfo []PlaceVmsXClusterResultPlacementInfo `xml:"PlaceVmsXClusterResultPlacementInfo,omitempty"` +} + +func init() { + t["ArrayOfPlaceVmsXClusterResultPlacementInfo"] = reflect.TypeOf((*ArrayOfPlaceVmsXClusterResultPlacementInfo)(nil)).Elem() +} + +type ArrayOfPlaceVmsXClusterSpecVmPlacementSpec struct { + PlaceVmsXClusterSpecVmPlacementSpec []PlaceVmsXClusterSpecVmPlacementSpec `xml:"PlaceVmsXClusterSpecVmPlacementSpec,omitempty"` +} + +func init() { + t["ArrayOfPlaceVmsXClusterSpecVmPlacementSpec"] = reflect.TypeOf((*ArrayOfPlaceVmsXClusterSpecVmPlacementSpec)(nil)).Elem() +} + +type PlaceVmsXCluster PlaceVmsXClusterRequestType + +func init() { + t["PlaceVmsXCluster"] = reflect.TypeOf((*PlaceVmsXCluster)(nil)).Elem() +} + +type PlaceVmsXClusterRequestType struct { + This ManagedObjectReference `xml:"_this"` + PlacementSpec PlaceVmsXClusterSpec `xml:"placementSpec"` +} + +func init() { + t["PlaceVmsXClusterRequestType"] = reflect.TypeOf((*PlaceVmsXClusterRequestType)(nil)).Elem() +} + +type PlaceVmsXClusterResponse struct { + Returnval PlaceVmsXClusterResult `xml:"returnval"` +} + +type PlaceVmsXClusterResult struct { + DynamicData + + PlacementInfos []PlaceVmsXClusterResultPlacementInfo `xml:"placementInfos,omitempty"` + Faults []PlaceVmsXClusterResultPlacementFaults `xml:"faults,omitempty"` +} + +func init() { + t["PlaceVmsXClusterResult"] = reflect.TypeOf((*PlaceVmsXClusterResult)(nil)).Elem() +} + +type PlaceVmsXClusterResultPlacementFaults struct { + DynamicData + + ResourcePool ManagedObjectReference `xml:"resourcePool"` + VmName string `xml:"vmName"` + Faults []LocalizedMethodFault `xml:"faults,omitempty"` +} + +func init() { + t["PlaceVmsXClusterResultPlacementFaults"] = reflect.TypeOf((*PlaceVmsXClusterResultPlacementFaults)(nil)).Elem() +} + +type PlaceVmsXClusterResultPlacementInfo struct { + DynamicData + + VmName string `xml:"vmName"` + Recommendation ClusterRecommendation `xml:"recommendation"` +} + +func init() { + t["PlaceVmsXClusterResultPlacementInfo"] = reflect.TypeOf((*PlaceVmsXClusterResultPlacementInfo)(nil)).Elem() +} + +type PlaceVmsXClusterSpec struct { + DynamicData + + ResourcePools []ManagedObjectReference `xml:"resourcePools,omitempty"` + VmPlacementSpecs []PlaceVmsXClusterSpecVmPlacementSpec `xml:"vmPlacementSpecs,omitempty"` + HostRecommRequired *bool `xml:"hostRecommRequired"` + DatastoreRecommRequired *bool `xml:"datastoreRecommRequired"` +} + +func init() { + t["PlaceVmsXClusterSpec"] = reflect.TypeOf((*PlaceVmsXClusterSpec)(nil)).Elem() +} + +type PlaceVmsXClusterSpecVmPlacementSpec struct { + DynamicData + + ConfigSpec VirtualMachineConfigSpec `xml:"configSpec"` +} + +func init() { + t["PlaceVmsXClusterSpecVmPlacementSpec"] = reflect.TypeOf((*PlaceVmsXClusterSpecVmPlacementSpec)(nil)).Elem() +} + +const RecommendationReasonCodeXClusterPlacement = RecommendationReasonCode("xClusterPlacement") diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/xml/LICENSE b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/xml/LICENSE index 74487567632c..6a66aea5eafe 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/xml/LICENSE +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/xml/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/xml/marshal.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/xml/marshal.go index 39bbac1d1719..c0c0a588bf5f 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/xml/marshal.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/xml/marshal.go @@ -16,7 +16,7 @@ import ( ) const ( - // A generic XML header suitable for use with the output of Marshal. + // Header is a generic XML header suitable for use with the output of Marshal. // This is not automatically added to any output of this package, // it is provided as a convenience. Header = `` + "\n" @@ -24,21 +24,21 @@ const ( // Marshal returns the XML encoding of v. // -// Marshal handles an array or slice by marshalling each of the elements. -// Marshal handles a pointer by marshalling the value it points at or, if the -// pointer is nil, by writing nothing. Marshal handles an interface value by -// marshalling the value it contains or, if the interface value is nil, by -// writing nothing. Marshal handles all other data by writing one or more XML +// Marshal handles an array or slice by marshaling each of the elements. +// Marshal handles a pointer by marshaling the value it points at or, if the +// pointer is nil, by writing nothing. Marshal handles an interface value by +// marshaling the value it contains or, if the interface value is nil, by +// writing nothing. Marshal handles all other data by writing one or more XML // elements containing the data. // // The name for the XML elements is taken from, in order of preference: // - the tag on the XMLName field, if the data is a struct -// - the value of the XMLName field of type xml.Name +// - the value of the XMLName field of type Name // - the tag of the struct field used to obtain the data // - the name of the struct field used to obtain the data -// - the name of the marshalled type +// - the name of the marshaled type // -// The XML element for a struct contains marshalled elements for each of the +// The XML element for a struct contains marshaled elements for each of the // exported fields of the struct, with these exceptions: // - the XMLName field, described above, is omitted. // - a field with tag "-" is omitted. @@ -48,10 +48,12 @@ const ( // field name in the XML element. // - a field with tag ",chardata" is written as character data, // not as an XML element. +// - a field with tag ",cdata" is written as character data +// wrapped in one or more tags, not as an XML element. // - a field with tag ",innerxml" is written verbatim, not subject -// to the usual marshalling procedure. +// to the usual marshaling procedure. // - a field with tag ",comment" is written as an XML comment, not -// subject to the usual marshalling procedure. It must not contain +// subject to the usual marshaling procedure. It must not contain // the "--" string within it. // - a field with a tag including the "omitempty" option is omitted // if the field value is empty. The empty values are false, 0, any @@ -59,11 +61,18 @@ const ( // string of length zero. // - an anonymous struct field is handled as if the fields of its // value were part of the outer struct. +// - a field implementing Marshaler is written by calling its MarshalXML +// method. +// - a field implementing encoding.TextMarshaler is written by encoding the +// result of its MarshalText method as text. // // If a field uses a tag "a>b>c", then the element c will be nested inside -// parent elements a and b. Fields that appear next to each other that name +// parent elements a and b. Fields that appear next to each other that name // the same parent will be enclosed in one XML element. // +// If the XML name for a struct field is defined by both the field tag and the +// struct's XMLName field, the names must match. +// // See MarshalIndent for an example. // // Marshal will return an error if asked to marshal a channel, function, or map. @@ -173,9 +182,9 @@ func (enc *Encoder) EncodeElement(v interface{}, start StartElement) error { } var ( - endComment = []byte("-->") - endProcInst = []byte("?>") - endDirective = []byte(">") + begComment = []byte("") + endProcInst = []byte("?>") ) // EncodeToken writes the given XML token to the stream. @@ -191,6 +200,7 @@ var ( // EncodeToken allows writing a ProcInst with Target set to "xml" only as the first token // in the stream. func (enc *Encoder) EncodeToken(t Token) error { + p := &enc.p switch t := t.(type) { case StartElement: @@ -202,7 +212,7 @@ func (enc *Encoder) EncodeToken(t Token) error { return err } case CharData: - EscapeText(p, t) + escapeText(p, t, false) case Comment: if bytes.Contains(t, endComment) { return fmt.Errorf("xml: EncodeToken of Comment containing --> marker") @@ -213,7 +223,7 @@ func (enc *Encoder) EncodeToken(t Token) error { return p.cachedWriteError() case ProcInst: // First token to be encoded which is also a ProcInst with target of xml - // is the xml declaration. The only ProcInst where target of xml is allowed. + // is the xml declaration. The only ProcInst where target of xml is allowed. if t.Target == "xml" && p.Buffered() != 0 { return fmt.Errorf("xml: EncodeToken of ProcInst xml target only valid for xml declaration, first token encoded") } @@ -231,16 +241,59 @@ func (enc *Encoder) EncodeToken(t Token) error { } p.WriteString("?>") case Directive: - if bytes.Contains(t, endDirective) { - return fmt.Errorf("xml: EncodeToken of Directive containing > marker") + if !isValidDirective(t) { + return fmt.Errorf("xml: EncodeToken of Directive containing wrong < or > markers") } p.WriteString("") + default: + return fmt.Errorf("xml: EncodeToken of invalid token type") + } return p.cachedWriteError() } +// isValidDirective reports whether dir is a valid directive text, +// meaning angle brackets are matched, ignoring comments and strings. +func isValidDirective(dir Directive) bool { + var ( + depth int + inquote uint8 + incomment bool + ) + for i, c := range dir { + switch { + case incomment: + if c == '>' { + if n := 1 + i - len(endComment); n >= 0 && bytes.Equal(dir[n:i+1], endComment) { + incomment = false + } + } + // Just ignore anything in comment + case inquote != 0: + if c == inquote { + inquote = 0 + } + // Just ignore anything within quotes + case c == '\'' || c == '"': + inquote = c + case c == '<': + if i+len(begComment) < len(dir) && bytes.Equal(dir[i:i+len(begComment)], begComment) { + incomment = true + } else { + depth++ + } + case c == '>': + if depth == 0 { + return false + } + depth-- + } + } + return depth == 0 && inquote == 0 && !incomment +} + // Flush flushes any buffered XML to the underlying writer. // See the EncodeToken documentation for details about when it is necessary. func (enc *Encoder) Flush() error { @@ -274,7 +327,7 @@ func (p *printer) createAttrPrefix(url string) string { // (The "http://www.w3.org/2000/xmlns/" name space is also predefined as "xmlns", // but users should not be trying to use that one directly - that's our job.) if url == xmlURL { - return "xml" + return xmlPrefix } // Need to define a new name space. @@ -426,8 +479,11 @@ func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplat xmlname := tinfo.xmlname if xmlname.name != "" { start.Name.Space, start.Name.Local = xmlname.xmlns, xmlname.name - } else if v, ok := xmlname.value(val).Interface().(Name); ok && v.Local != "" { - start.Name = v + } else { + fv := xmlname.value(val, dontInitNilPointers) + if v, ok := fv.Interface().(Name); ok && v.Local != "" { + start.Name = v + } } } if start.Name.Local == "" && finfo != nil { @@ -452,8 +508,7 @@ func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplat if finfo.flags&fAttr == 0 { continue } - fv := finfo.value(val) - name := Name{Space: finfo.xmlns, Local: finfo.name} + fv := finfo.value(val, dontInitNilPointers) if finfo.flags&fOmitEmpty != 0 && isEmptyValue(fv) { continue @@ -463,69 +518,10 @@ func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplat continue } - if fv.CanInterface() && fv.Type().Implements(marshalerAttrType) { - attr, err := fv.Interface().(MarshalerAttr).MarshalXMLAttr(name) - if err != nil { - return err - } - if attr.Name.Local != "" { - start.Attr = append(start.Attr, attr) - } - continue - } - - if fv.CanAddr() { - pv := fv.Addr() - if pv.CanInterface() && pv.Type().Implements(marshalerAttrType) { - attr, err := pv.Interface().(MarshalerAttr).MarshalXMLAttr(name) - if err != nil { - return err - } - if attr.Name.Local != "" { - start.Attr = append(start.Attr, attr) - } - continue - } - } - - if fv.CanInterface() && fv.Type().Implements(textMarshalerType) { - text, err := fv.Interface().(encoding.TextMarshaler).MarshalText() - if err != nil { - return err - } - start.Attr = append(start.Attr, Attr{name, string(text)}) - continue - } - - if fv.CanAddr() { - pv := fv.Addr() - if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { - text, err := pv.Interface().(encoding.TextMarshaler).MarshalText() - if err != nil { - return err - } - start.Attr = append(start.Attr, Attr{name, string(text)}) - continue - } - } - - // Dereference or skip nil pointer, interface values. - switch fv.Kind() { - case reflect.Ptr, reflect.Interface: - if fv.IsNil() { - continue - } - fv = fv.Elem() - } - - s, b, err := p.marshalSimple(fv.Type(), fv) - if err != nil { + name := Name{Space: finfo.xmlns, Local: finfo.name} + if err := p.marshalAttr(&start, name, fv); err != nil { return err } - if b != nil { - s = string(b) - } - start.Attr = append(start.Attr, Attr{name, s}) } if err := p.writeStart(&start); err != nil { @@ -555,6 +551,90 @@ func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplat return p.cachedWriteError() } +// marshalAttr marshals an attribute with the given name and value, adding to start.Attr. +func (p *printer) marshalAttr(start *StartElement, name Name, val reflect.Value) error { + if val.CanInterface() && val.Type().Implements(marshalerAttrType) { + attr, err := val.Interface().(MarshalerAttr).MarshalXMLAttr(name) + if err != nil { + return err + } + if attr.Name.Local != "" { + start.Attr = append(start.Attr, attr) + } + return nil + } + + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(marshalerAttrType) { + attr, err := pv.Interface().(MarshalerAttr).MarshalXMLAttr(name) + if err != nil { + return err + } + if attr.Name.Local != "" { + start.Attr = append(start.Attr, attr) + } + return nil + } + } + + if val.CanInterface() && val.Type().Implements(textMarshalerType) { + text, err := val.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + start.Attr = append(start.Attr, Attr{name, string(text)}) + return nil + } + + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + text, err := pv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + start.Attr = append(start.Attr, Attr{name, string(text)}) + return nil + } + } + + // Dereference or skip nil pointer, interface values. + switch val.Kind() { + case reflect.Ptr, reflect.Interface: + if val.IsNil() { + return nil + } + val = val.Elem() + } + + // Walk slices. + if val.Kind() == reflect.Slice && val.Type().Elem().Kind() != reflect.Uint8 { + n := val.Len() + for i := 0; i < n; i++ { + if err := p.marshalAttr(start, name, val.Index(i)); err != nil { + return err + } + } + return nil + } + + if val.Type() == attrType { + start.Attr = append(start.Attr, val.Interface().(Attr)) + return nil + } + + s, b, err := p.marshalSimple(val.Type(), val) + if err != nil { + return err + } + if b != nil { + s = string(b) + } + start.Attr = append(start.Attr, Attr{name, s}) + return nil +} + // defaultStart returns the default start element to use, // given the reflect type, field info, and start template. func defaultStart(typ reflect.Type, finfo *fieldInfo, startTemplate *StartElement) StartElement { @@ -716,6 +796,20 @@ func (p *printer) marshalSimple(typ reflect.Type, val reflect.Value) (string, [] var ddBytes = []byte("--") +// indirect drills into interfaces and pointers, returning the pointed-at value. +// If it encounters a nil interface or pointer, indirect returns that nil value. +// This can turn into an infinite loop given a cyclic chain, +// but it matches the Go 1 behavior. +func indirect(vf reflect.Value) reflect.Value { + for vf.Kind() == reflect.Interface || vf.Kind() == reflect.Ptr { + if vf.IsNil() { + return vf + } + vf = vf.Elem() + } + return vf +} + func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error { s := parentStack{p: p} for i := range tinfo.fields { @@ -723,24 +817,30 @@ func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error { if finfo.flags&fAttr != 0 { continue } - vf := finfo.value(val) - - // Dereference or skip nil pointer, interface values. - switch vf.Kind() { - case reflect.Ptr, reflect.Interface: - if !vf.IsNil() { - vf = vf.Elem() - } + vf := finfo.value(val, dontInitNilPointers) + if !vf.IsValid() { + // The field is behind an anonymous struct field that's + // nil. Skip it. + continue } switch finfo.flags & fMode { - case fCharData: + case fCDATA, fCharData: + emit := EscapeText + if finfo.flags&fMode == fCDATA { + emit = emitCDATA + } + if err := s.trim(finfo.parents); err != nil { + return err + } if vf.CanInterface() && vf.Type().Implements(textMarshalerType) { data, err := vf.Interface().(encoding.TextMarshaler).MarshalText() if err != nil { return err } - Escape(p, data) + if err := emit(p, data); err != nil { + return err + } continue } if vf.CanAddr() { @@ -750,27 +850,39 @@ func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error { if err != nil { return err } - Escape(p, data) + if err := emit(p, data); err != nil { + return err + } continue } } + var scratch [64]byte + vf = indirect(vf) switch vf.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - Escape(p, strconv.AppendInt(scratch[:0], vf.Int(), 10)) + if err := emit(p, strconv.AppendInt(scratch[:0], vf.Int(), 10)); err != nil { + return err + } case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - Escape(p, strconv.AppendUint(scratch[:0], vf.Uint(), 10)) + if err := emit(p, strconv.AppendUint(scratch[:0], vf.Uint(), 10)); err != nil { + return err + } case reflect.Float32, reflect.Float64: - Escape(p, strconv.AppendFloat(scratch[:0], vf.Float(), 'g', -1, vf.Type().Bits())) + if err := emit(p, strconv.AppendFloat(scratch[:0], vf.Float(), 'g', -1, vf.Type().Bits())); err != nil { + return err + } case reflect.Bool: - Escape(p, strconv.AppendBool(scratch[:0], vf.Bool())) + if err := emit(p, strconv.AppendBool(scratch[:0], vf.Bool())); err != nil { + return err + } case reflect.String: - if err := EscapeText(p, []byte(vf.String())); err != nil { + if err := emit(p, []byte(vf.String())); err != nil { return err } case reflect.Slice: if elem, ok := vf.Interface().([]byte); ok { - if err := EscapeText(p, elem); err != nil { + if err := emit(p, elem); err != nil { return err } } @@ -778,6 +890,10 @@ func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error { continue case fComment: + if err := s.trim(finfo.parents); err != nil { + return err + } + vf = indirect(vf) k := vf.Kind() if !(k == reflect.String || k == reflect.Slice && vf.Type().Elem().Kind() == reflect.Uint8) { return fmt.Errorf("xml: bad type for comment field of %s", val.Type()) @@ -792,14 +908,14 @@ func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error { switch k { case reflect.String: s := vf.String() - dashDash = strings.Index(s, "--") >= 0 + dashDash = strings.Contains(s, "--") dashLast = s[len(s)-1] == '-' if !dashDash { p.WriteString(s) } case reflect.Slice: b := vf.Bytes() - dashDash = bytes.Index(b, ddBytes) >= 0 + dashDash = bytes.Contains(b, ddBytes) dashLast = b[len(b)-1] == '-' if !dashDash { p.Write(b) @@ -817,7 +933,8 @@ func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error { p.WriteString("-->") continue - case fInnerXml: + case fInnerXML: + vf = indirect(vf) iface := vf.Interface() switch raw := iface.(type) { case []byte: @@ -891,8 +1008,8 @@ type parentStack struct { } // trim updates the XML context to match the longest common prefix of the stack -// and the given parents. A closing tag will be written for every parent -// popped. Passing a zero slice or nil will close all the elements. +// and the given parents. A closing tag will be written for every parent +// popped. Passing a zero slice or nil will close all the elements. func (s *parentStack) trim(parents []string) error { split := 0 for ; split < len(parents) && split < len(s.stack); split++ { @@ -905,7 +1022,7 @@ func (s *parentStack) trim(parents []string) error { return err } } - s.stack = parents[:split] + s.stack = s.stack[:split] return nil } @@ -920,7 +1037,7 @@ func (s *parentStack) push(parents []string) error { return nil } -// A MarshalXMLError is returned when Marshal encounters a type +// UnsupportedTypeError is returned when Marshal encounters a type // that cannot be converted into XML. type UnsupportedTypeError struct { Type reflect.Type diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/xml/read.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/xml/read.go index fe35fce6ca45..ea61352f6db3 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/xml/read.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/xml/read.go @@ -1,4 +1,4 @@ -// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -27,7 +27,7 @@ import ( // discarded. // // Because Unmarshal uses the reflect package, it can only assign -// to exported (upper case) fields. Unmarshal uses a case-sensitive +// to exported (upper case) fields. Unmarshal uses a case-sensitive // comparison to match XML element names to tag values and struct // field names. // @@ -37,9 +37,9 @@ import ( // // * If the struct has a field of type []byte or string with tag // ",innerxml", Unmarshal accumulates the raw XML nested inside the -// element in that field. The rest of the rules still apply. +// element in that field. The rest of the rules still apply. // -// * If the struct has a field named XMLName of type xml.Name, +// * If the struct has a field named XMLName of type Name, // Unmarshal records the element name in that field. // // * If the XMLName field has an associated tag of the form @@ -52,6 +52,11 @@ import ( // the explicit name in a struct field tag of the form "name,attr", // Unmarshal records the attribute value in that field. // +// * If the XML element has an attribute not handled by the previous +// rule and the struct has a field with an associated tag containing +// ",any,attr", Unmarshal records the attribute value in the first +// such field. +// // * If the XML element contains character data, that data is // accumulated in the first struct field that has tag ",chardata". // The struct field may have type []byte or string. @@ -59,7 +64,7 @@ import ( // // * If the XML element contains comments, they are accumulated in // the first struct field that has tag ",comment". The struct -// field may have type []byte or string. If there is no such +// field may have type []byte or string. If there is no such // field, the comments are discarded. // // * If the XML element contains a sub-element whose name matches @@ -85,7 +90,12 @@ import ( // * An anonymous struct field is handled as if the fields of its // value were part of the outer struct. // -// * A struct field with tag "-" is never unmarshalled into. +// * A struct field with tag "-" is never unmarshaled into. +// +// If Unmarshal encounters a field type that implements the Unmarshaler +// interface, Unmarshal calls its UnmarshalXML method to produce the value from +// the XML element. Otherwise, if the value implements +// encoding.TextUnmarshaler, Unmarshal calls that value's UnmarshalText method. // // Unmarshal maps an XML element to a string or []byte by saving the // concatenation of that element's character data in the string or @@ -94,34 +104,42 @@ import ( // Unmarshal maps an attribute value to a string or []byte by saving // the value in the string or slice. // -// Unmarshal maps an XML element to a slice by extending the length of -// the slice and mapping the element to the newly created value. +// Unmarshal maps an attribute value to an Attr by saving the attribute, +// including its name, in the Attr. +// +// Unmarshal maps an XML element or attribute value to a slice by +// extending the length of the slice and mapping the element or attribute +// to the newly created value. // // Unmarshal maps an XML element or attribute value to a bool by -// setting it to the boolean value represented by the string. +// setting it to the boolean value represented by the string. Whitespace +// is trimmed and ignored. // // Unmarshal maps an XML element or attribute value to an integer or // floating-point field by setting the field to the result of -// interpreting the string value in decimal. There is no check for -// overflow. +// interpreting the string value in decimal. There is no check for +// overflow. Whitespace is trimmed and ignored. // -// Unmarshal maps an XML element to an xml.Name by recording the -// element name. +// Unmarshal maps an XML element to a Name by recording the element +// name. // // Unmarshal maps an XML element to a pointer by setting the pointer // to a freshly allocated value and then mapping the element to that value. // +// A missing element or empty attribute value will be unmarshaled as a zero value. +// If the field is a slice, a zero value will be appended to the field. Otherwise, the +// field will be set to its zero value. func Unmarshal(data []byte, v interface{}) error { return NewDecoder(bytes.NewReader(data)).Decode(v) } -// Decode works like xml.Unmarshal, except it reads the decoder +// Decode works like Unmarshal, except it reads the decoder // stream to find the start element. func (d *Decoder) Decode(v interface{}) error { return d.DecodeElement(v, nil) } -// DecodeElement works like xml.Unmarshal except that it takes +// DecodeElement works like Unmarshal except that it takes // a pointer to the start XML element to decode into v. // It is useful when a client reads some raw XML tokens itself // but also wants to defer to Unmarshal for some elements. @@ -133,7 +151,7 @@ func (d *Decoder) DecodeElement(v interface{}, start *StartElement) error { return d.unmarshal(val.Elem(), start) } -// An UnmarshalError represents an error in the unmarshalling process. +// An UnmarshalError represents an error in the unmarshaling process. type UnmarshalError string func (e UnmarshalError) Error() string { return string(e) } @@ -148,7 +166,7 @@ func (e UnmarshalError) Error() string { return string(e) } // UnmarshalXML must consume exactly one XML element. // One common implementation strategy is to unmarshal into // a separate value with a layout matching the expected XML -// using d.DecodeElement, and then to copy the data from +// using d.DecodeElement, and then to copy the data from // that value into the receiver. // Another common strategy is to use d.Token to process the // XML object one token at a time. @@ -180,19 +198,19 @@ func receiverType(val interface{}) string { // unmarshalInterface unmarshals a single XML element into val. // start is the opening tag of the element. -func (p *Decoder) unmarshalInterface(val Unmarshaler, start *StartElement) error { +func (d *Decoder) unmarshalInterface(val Unmarshaler, start *StartElement) error { // Record that decoder must stop at end tag corresponding to start. - p.pushEOF() + d.pushEOF() - p.unmarshalDepth++ - err := val.UnmarshalXML(p, *start) - p.unmarshalDepth-- + d.unmarshalDepth++ + err := val.UnmarshalXML(d, *start) + d.unmarshalDepth-- if err != nil { - p.popEOF() + d.popEOF() return err } - if !p.popEOF() { + if !d.popEOF() { return fmt.Errorf("xml: %s.UnmarshalXML did not consume entire <%s> element", receiverType(val), start.Name.Local) } @@ -202,11 +220,11 @@ func (p *Decoder) unmarshalInterface(val Unmarshaler, start *StartElement) error // unmarshalTextInterface unmarshals a single XML element into val. // The chardata contained in the element (but not its children) // is passed to the text unmarshaler. -func (p *Decoder) unmarshalTextInterface(val encoding.TextUnmarshaler, start *StartElement) error { +func (d *Decoder) unmarshalTextInterface(val encoding.TextUnmarshaler) error { var buf []byte depth := 1 for depth > 0 { - t, err := p.Token() + t, err := d.Token() if err != nil { return err } @@ -225,14 +243,13 @@ func (p *Decoder) unmarshalTextInterface(val encoding.TextUnmarshaler, start *St } // unmarshalAttr unmarshals a single XML attribute into val. -func (p *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error { +func (d *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error { if val.Kind() == reflect.Ptr { if val.IsNil() { val.Set(reflect.New(val.Type().Elem())) } val = val.Elem() } - if val.CanInterface() && val.Type().Implements(unmarshalerAttrType) { // This is an unmarshaler with a non-pointer receiver, // so it's likely to be incorrect, but we do what we're told. @@ -258,11 +275,30 @@ func (p *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error { } } - copyValue(val, []byte(attr.Value)) - return nil + if val.Type().Kind() == reflect.Slice && val.Type().Elem().Kind() != reflect.Uint8 { + // Slice of element values. + // Grow slice. + n := val.Len() + val.Set(reflect.Append(val, reflect.Zero(val.Type().Elem()))) + + // Recur to read element into slice. + if err := d.unmarshalAttr(val.Index(n), attr); err != nil { + val.SetLen(n) + return err + } + return nil + } + + if val.Type() == attrType { + val.Set(reflect.ValueOf(attr)) + return nil + } + + return copyValue(val, []byte(attr.Value)) } var ( + attrType = reflect.TypeOf(Attr{}) unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() unmarshalerAttrType = reflect.TypeOf((*UnmarshalerAttr)(nil)).Elem() textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() @@ -271,21 +307,9 @@ var ( // Find reflect.Type for an element's type attribute. func (p *Decoder) typeForElement(val reflect.Value, start *StartElement) reflect.Type { t := "" - for i, a := range start.Attr { + for _, a := range start.Attr { if a.Name == xmlSchemaInstance || a.Name == xsiType { t = a.Value - // HACK: ensure xsi:type is last in the list to avoid using that value for - // a "type" attribute, such as ManagedObjectReference.Type for example. - // Note that xsi:type is already the last attribute in VC/ESX responses. - // This is only an issue with govmomi simulator generated responses. - // Proper fix will require finding a few needles in this xml package haystack. - // Note: govmomi uses xmlSchemaInstance, other clients (e.g. rbvmomi) use xsiType. - // They are the same thing to XML parsers, but not to this hack here. - x := len(start.Attr) - 1 - if i != x { - start.Attr[i] = start.Attr[x] - start.Attr[x] = a - } break } } @@ -312,11 +336,11 @@ func (p *Decoder) typeForElement(val reflect.Value, start *StartElement) reflect } // Unmarshal a single XML element into val. -func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error { +func (d *Decoder) unmarshal(val reflect.Value, start *StartElement) error { // Find start element if we need it. if start == nil { for { - tok, err := p.Token() + tok, err := d.Token() if err != nil { return err } @@ -329,10 +353,10 @@ func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error { // Try to figure out type for empty interface values. if val.Kind() == reflect.Interface && val.IsNil() { - typ := p.typeForElement(val, start) + typ := d.typeForElement(val, start) if typ != nil { pval := reflect.New(typ).Elem() - err := p.unmarshal(pval, start) + err := d.unmarshal(pval, start) if err != nil { return err } @@ -371,24 +395,24 @@ func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error { if val.CanInterface() && val.Type().Implements(unmarshalerType) { // This is an unmarshaler with a non-pointer receiver, // so it's likely to be incorrect, but we do what we're told. - return p.unmarshalInterface(val.Interface().(Unmarshaler), start) + return d.unmarshalInterface(val.Interface().(Unmarshaler), start) } if val.CanAddr() { pv := val.Addr() if pv.CanInterface() && pv.Type().Implements(unmarshalerType) { - return p.unmarshalInterface(pv.Interface().(Unmarshaler), start) + return d.unmarshalInterface(pv.Interface().(Unmarshaler), start) } } if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { - return p.unmarshalTextInterface(val.Interface().(encoding.TextUnmarshaler), start) + return d.unmarshalTextInterface(val.Interface().(encoding.TextUnmarshaler)) } if val.CanAddr() { pv := val.Addr() if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { - return p.unmarshalTextInterface(pv.Interface().(encoding.TextUnmarshaler), start) + return d.unmarshalTextInterface(pv.Interface().(encoding.TextUnmarshaler)) } } @@ -414,7 +438,7 @@ func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error { // TODO: For now, simply ignore the field. In the near // future we may choose to unmarshal the start // element on it, if not nil. - return p.Skip() + return d.Skip() case reflect.Slice: typ := v.Type() @@ -427,19 +451,10 @@ func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error { // Slice of element values. // Grow slice. n := v.Len() - if n >= v.Cap() { - ncap := 2 * n - if ncap < 4 { - ncap = 4 - } - new := reflect.MakeSlice(typ, n, ncap) - reflect.Copy(new, v) - v.Set(new) - } - v.SetLen(n + 1) + v.Set(reflect.Append(val, reflect.Zero(v.Type().Elem()))) // Recur to read element into slice. - if err := p.unmarshal(v.Index(n), start); err != nil { + if err := d.unmarshal(v.Index(n), start); err != nil { v.SetLen(n) return err } @@ -476,52 +491,74 @@ func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error { } return UnmarshalError(e) } - fv := finfo.value(sv) + fv := finfo.value(sv, initNilPointers) if _, ok := fv.Interface().(Name); ok { fv.Set(reflect.ValueOf(start.Name)) } } // Assign attributes. - // Also, determine whether we need to save character data or comments. - for i := range tinfo.fields { - finfo := &tinfo.fields[i] - switch finfo.flags & fMode { - case fAttr: - strv := finfo.value(sv) - // Look for attribute. - for _, a := range start.Attr { + for _, a := range start.Attr { + handled := false + any := -1 + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + switch finfo.flags & fMode { + case fAttr: + strv := finfo.value(sv, initNilPointers) if a.Name.Local == finfo.name && (finfo.xmlns == "" || finfo.xmlns == a.Name.Space) { - if err := p.unmarshalAttr(strv, a); err != nil { - return err + needTypeAttr := (finfo.flags & fTypeAttr) != 0 + // HACK: avoid using xsi:type value for a "type" attribute, such as ManagedObjectReference.Type for example. + if needTypeAttr || (a.Name != xmlSchemaInstance && a.Name != xsiType) { + if err := d.unmarshalAttr(strv, a); err != nil { + return err + } } - break + handled = true } + + case fAny | fAttr: + if any == -1 { + any = i + } + } + } + if !handled && any >= 0 { + finfo := &tinfo.fields[any] + strv := finfo.value(sv, initNilPointers) + if err := d.unmarshalAttr(strv, a); err != nil { + return err } + } + } - case fCharData: + // Determine whether we need to save character data or comments. + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + switch finfo.flags & fMode { + case fCDATA, fCharData: if !saveData.IsValid() { - saveData = finfo.value(sv) + saveData = finfo.value(sv, initNilPointers) } case fComment: if !saveComment.IsValid() { - saveComment = finfo.value(sv) + saveComment = finfo.value(sv, initNilPointers) } case fAny, fAny | fElement: if !saveAny.IsValid() { - saveAny = finfo.value(sv) + saveAny = finfo.value(sv, initNilPointers) } - case fInnerXml: + case fInnerXML: if !saveXML.IsValid() { - saveXML = finfo.value(sv) - if p.saved == nil { + saveXML = finfo.value(sv, initNilPointers) + if d.saved == nil { saveXMLIndex = 0 - p.saved = new(bytes.Buffer) + d.saved = new(bytes.Buffer) } else { - saveXMLIndex = p.savedOffset() + saveXMLIndex = d.savedOffset() } } } @@ -534,9 +571,9 @@ Loop: for { var savedOffset int if saveXML.IsValid() { - savedOffset = p.savedOffset() + savedOffset = d.savedOffset() } - tok, err := p.Token() + tok, err := d.Token() if err != nil { return err } @@ -544,28 +581,28 @@ Loop: case StartElement: consumed := false if sv.IsValid() { - consumed, err = p.unmarshalPath(tinfo, sv, nil, &t) + consumed, err = d.unmarshalPath(tinfo, sv, nil, &t) if err != nil { return err } if !consumed && saveAny.IsValid() { consumed = true - if err := p.unmarshal(saveAny, &t); err != nil { + if err := d.unmarshal(saveAny, &t); err != nil { return err } } } if !consumed { - if err := p.Skip(); err != nil { + if err := d.Skip(); err != nil { return err } } case EndElement: if saveXML.IsValid() { - saveXMLData = p.saved.Bytes()[saveXMLIndex:savedOffset] + saveXMLData = d.saved.Bytes()[saveXMLIndex:savedOffset] if saveXMLIndex == 0 { - p.saved = nil + d.saved = nil } } break Loop @@ -614,7 +651,9 @@ Loop: case reflect.String: t.SetString(string(saveXMLData)) case reflect.Slice: - t.Set(reflect.ValueOf(saveXMLData)) + if t.Type().Elem().Kind() == reflect.Uint8 { + t.Set(reflect.ValueOf(saveXMLData)) + } } return nil @@ -637,7 +676,11 @@ func copyValue(dst reflect.Value, src []byte) (err error) { default: return errors.New("cannot unmarshal into " + dst0.Type().String()) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - itmp, err := strconv.ParseInt(string(src), 10, dst.Type().Bits()) + if len(src) == 0 { + dst.SetInt(0) + return nil + } + itmp, err := strconv.ParseInt(strings.TrimSpace(string(src)), 10, dst.Type().Bits()) if err != nil { return err } @@ -663,19 +706,32 @@ func copyValue(dst reflect.Value, src []byte) (err error) { utmp = uint64(uint64(itmp)) } } else { - utmp, err = strconv.ParseUint(string(src), 10, dst.Type().Bits()) + if len(src) == 0 { + dst.SetUint(0) + return nil + } + + utmp, err = strconv.ParseUint(strings.TrimSpace(string(src)), 10, dst.Type().Bits()) if err != nil { return err } } dst.SetUint(utmp) case reflect.Float32, reflect.Float64: - ftmp, err := strconv.ParseFloat(string(src), dst.Type().Bits()) + if len(src) == 0 { + dst.SetFloat(0) + return nil + } + ftmp, err := strconv.ParseFloat(strings.TrimSpace(string(src)), dst.Type().Bits()) if err != nil { return err } dst.SetFloat(ftmp) case reflect.Bool: + if len(src) == 0 { + dst.SetBool(false) + return nil + } value, err := strconv.ParseBool(strings.TrimSpace(string(src))) if err != nil { return err @@ -698,7 +754,7 @@ func copyValue(dst reflect.Value, src []byte) (err error) { // The consumed result tells whether XML elements have been consumed // from the Decoder until start's matching end element, or if it's // still untouched because start is uninteresting for sv's fields. -func (p *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) { +func (d *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) { recurse := false Loop: for i := range tinfo.fields { @@ -713,7 +769,7 @@ Loop: } if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local { // It's a perfect match, unmarshal the field. - return true, p.unmarshal(finfo.value(sv), start) + return true, d.unmarshal(finfo.value(sv, initNilPointers), start) } if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local { // It's a prefix for the field. Break and recurse @@ -736,18 +792,18 @@ Loop: // prefix. Recurse and attempt to match these. for { var tok Token - tok, err = p.Token() + tok, err = d.Token() if err != nil { return true, err } switch t := tok.(type) { case StartElement: - consumed2, err := p.unmarshalPath(tinfo, sv, parents, &t) + consumed2, err := d.unmarshalPath(tinfo, sv, parents, &t) if err != nil { return true, err } if !consumed2 { - if err := p.Skip(); err != nil { + if err := d.Skip(); err != nil { return true, err } } diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/xml/typeinfo.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/xml/typeinfo.go index 086e83b69913..5eb0b4e5fdff 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/xml/typeinfo.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/xml/typeinfo.go @@ -1,4 +1,4 @@ -// Copyright 2011 The Go Authors. All rights reserved. +// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -31,37 +31,37 @@ type fieldFlags int const ( fElement fieldFlags = 1 << iota fAttr + fCDATA fCharData - fInnerXml + fInnerXML fComment fAny fOmitEmpty fTypeAttr - fMode = fElement | fAttr | fCharData | fInnerXml | fComment | fAny + fMode = fElement | fAttr | fCDATA | fCharData | fInnerXML | fComment | fAny + + xmlName = "XMLName" ) -var tinfoMap = make(map[reflect.Type]*typeInfo) -var tinfoLock sync.RWMutex +var tinfoMap sync.Map // map[reflect.Type]*typeInfo var nameType = reflect.TypeOf(Name{}) // getTypeInfo returns the typeInfo structure with details necessary -// for marshalling and unmarshalling typ. +// for marshaling and unmarshaling typ. func getTypeInfo(typ reflect.Type) (*typeInfo, error) { - tinfoLock.RLock() - tinfo, ok := tinfoMap[typ] - tinfoLock.RUnlock() - if ok { - return tinfo, nil + if ti, ok := tinfoMap.Load(typ); ok { + return ti.(*typeInfo), nil } - tinfo = &typeInfo{} + + tinfo := &typeInfo{} if typ.Kind() == reflect.Struct && typ != nameType { n := typ.NumField() for i := 0; i < n; i++ { f := typ.Field(i) - if f.PkgPath != "" || f.Tag.Get("xml") == "-" { + if (f.PkgPath != "" && !f.Anonymous) || f.Tag.Get("xml") == "-" { continue // Private field } @@ -94,7 +94,7 @@ func getTypeInfo(typ reflect.Type) (*typeInfo, error) { return nil, err } - if f.Name == "XMLName" { + if f.Name == xmlName { tinfo.xmlname = finfo continue } @@ -105,10 +105,9 @@ func getTypeInfo(typ reflect.Type) (*typeInfo, error) { } } } - tinfoLock.Lock() - tinfoMap[typ] = tinfo - tinfoLock.Unlock() - return tinfo, nil + + ti, _ := tinfoMap.LoadOrStore(typ, tinfo) + return ti.(*typeInfo), nil } // structFieldInfo builds and returns a fieldInfo for f. @@ -131,10 +130,12 @@ func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, erro switch flag { case "attr": finfo.flags |= fAttr + case "cdata": + finfo.flags |= fCDATA case "chardata": finfo.flags |= fCharData case "innerxml": - finfo.flags |= fInnerXml + finfo.flags |= fInnerXML case "comment": finfo.flags |= fComment case "any": @@ -151,8 +152,8 @@ func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, erro switch mode := finfo.flags & fMode; mode { case 0: finfo.flags |= fElement - case fAttr, fCharData, fInnerXml, fComment, fAny: - if f.Name == "XMLName" || tag != "" && mode != fAttr { + case fAttr, fCDATA, fCharData, fInnerXML, fComment, fAny, fAny | fAttr: + if f.Name == xmlName || tag != "" && mode != fAttr { valid = false } default: @@ -177,7 +178,7 @@ func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, erro f.Name, typ, f.Tag.Get("xml")) } - if f.Name == "XMLName" { + if f.Name == xmlName { // The XMLName field records the XML element name. Don't // process it as usual because its name should default to // empty rather than to the field name. @@ -214,7 +215,7 @@ func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, erro } // If the field type has an XMLName field, the names must match - // so that the behavior of both marshalling and unmarshalling + // so that the behavior of both marshaling and unmarshaling // is straightforward and unambiguous. if finfo.flags&fElement != 0 { ftyp := f.Type @@ -239,11 +240,11 @@ func lookupXMLName(typ reflect.Type) (xmlname *fieldInfo) { } for i, n := 0, typ.NumField(); i < n; i++ { f := typ.Field(i) - if f.Name != "XMLName" { + if f.Name != xmlName { continue } finfo, err := structFieldInfo(typ, &f) - if finfo.name != "" && err == nil { + if err == nil && finfo.name != "" { return finfo } // Also consider errors as a non-existent field tag @@ -334,7 +335,7 @@ Loop: return nil } -// A TagPathError represents an error in the unmarshalling process +// A TagPathError represents an error in the unmarshaling process // caused by the use of field tags with conflicting paths. type TagPathError struct { Struct reflect.Type @@ -346,15 +347,25 @@ func (e *TagPathError) Error() string { return fmt.Sprintf("%s field %q with tag %q conflicts with field %q with tag %q", e.Struct, e.Field1, e.Tag1, e.Field2, e.Tag2) } +const ( + initNilPointers = true + dontInitNilPointers = false +) + // value returns v's field value corresponding to finfo. -// It's equivalent to v.FieldByIndex(finfo.idx), but initializes -// and dereferences pointers as necessary. -func (finfo *fieldInfo) value(v reflect.Value) reflect.Value { +// It's equivalent to v.FieldByIndex(finfo.idx), but when passed +// initNilPointers, it initializes and dereferences pointers as necessary. +// When passed dontInitNilPointers and a nil pointer is reached, the function +// returns a zero reflect.Value. +func (finfo *fieldInfo) value(v reflect.Value, shouldInitNilPointers bool) reflect.Value { for i, x := range finfo.idx { if i > 0 { t := v.Type() if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { if v.IsNil() { + if !shouldInitNilPointers { + return reflect.Value{} + } v.Set(reflect.New(v.Type().Elem())) } v = v.Elem() diff --git a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/xml/xml.go b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/xml/xml.go index 6c6c5c8212b8..28631bdfe4aa 100644 --- a/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/xml/xml.go +++ b/cluster-autoscaler/vendor/github.com/vmware/govmomi/vim25/xml/xml.go @@ -7,8 +7,8 @@ package xml // References: -// Annotated XML spec: http://www.xml.com/axml/testaxml.htm -// XML name spaces: http://www.w3.org/TR/REC-xml-names/ +// Annotated XML spec: https://www.xml.com/axml/testaxml.htm +// XML name spaces: https://www.w3.org/TR/REC-xml-names/ // TODO(rsc): // Test error handling. @@ -61,6 +61,7 @@ type StartElement struct { Attr []Attr } +// Copy creates a new copy of StartElement. func (e StartElement) Copy() StartElement { attrs := make([]Attr, len(e.Attr)) copy(attrs, e.Attr) @@ -89,12 +90,14 @@ func makeCopy(b []byte) []byte { return b1 } +// Copy creates a new copy of CharData. func (c CharData) Copy() CharData { return CharData(makeCopy(c)) } // A Comment represents an XML comment of the form . // The bytes do not include the comment markers. type Comment []byte +// Copy creates a new copy of Comment. func (c Comment) Copy() Comment { return Comment(makeCopy(c)) } // A ProcInst represents an XML processing instruction of the form @@ -103,6 +106,7 @@ type ProcInst struct { Inst []byte } +// Copy creates a new copy of ProcInst. func (p ProcInst) Copy() ProcInst { p.Inst = makeCopy(p.Inst) return p @@ -112,6 +116,7 @@ func (p ProcInst) Copy() ProcInst { // The bytes do not include the markers. type Directive []byte +// Copy creates a new copy of Directive. func (d Directive) Copy() Directive { return Directive(makeCopy(d)) } // CopyToken returns a copy of a Token. @@ -131,6 +136,23 @@ func CopyToken(t Token) Token { return t } +// A TokenReader is anything that can decode a stream of XML tokens, including a +// Decoder. +// +// When Token encounters an error or end-of-file condition after successfully +// reading a token, it returns the token. It may return the (non-nil) error from +// the same call or return the error (and a nil token) from a subsequent call. +// An instance of this general case is that a TokenReader returning a non-nil +// token at the end of the token stream may return either io.EOF or a nil error. +// The next Read should return nil, io.EOF. +// +// Implementations of Token are discouraged from returning a nil token with a +// nil error. Callers should treat a return of nil, nil as indicating that +// nothing happened; in particular it does not indicate EOF. +type TokenReader interface { + Token() (Token, error) +} + // A Decoder represents an XML parser reading a particular input stream. // The parser assumes that its input is encoded in UTF-8. type Decoder struct { @@ -146,9 +168,9 @@ type Decoder struct { // // Setting: // - // d.Strict = false; - // d.AutoClose = HTMLAutoClose; - // d.Entity = HTMLEntity + // d.Strict = false + // d.AutoClose = xml.HTMLAutoClose + // d.Entity = xml.HTMLEntity // // creates a parser that can handle typical HTML. // @@ -177,7 +199,7 @@ type Decoder struct { // charset-conversion readers, converting from the provided // non-UTF-8 charset into UTF-8. If CharsetReader is nil or // returns an error, parsing stops with an error. One of the - // the CharsetReader's result values must be non-nil. + // CharsetReader's result values must be non-nil. CharsetReader func(charset string, input io.Reader) (io.Reader, error) // DefaultSpace sets the default name space used for unadorned tags, @@ -189,6 +211,7 @@ type Decoder struct { TypeFunc func(string) (reflect.Type, bool) r io.ByteReader + t TokenReader buf bytes.Buffer saved *bytes.Buffer stk *stack @@ -200,6 +223,7 @@ type Decoder struct { ns map[string]string err error line int + offset int64 unmarshalDepth int } @@ -217,12 +241,28 @@ func NewDecoder(r io.Reader) *Decoder { return d } +// NewTokenDecoder creates a new XML parser using an underlying token stream. +func NewTokenDecoder(t TokenReader) *Decoder { + // Is it already a Decoder? + if d, ok := t.(*Decoder); ok { + return d + } + d := &Decoder{ + ns: make(map[string]string), + t: t, + nextByte: -1, + line: 1, + Strict: true, + } + return d +} + // Token returns the next XML token in the input stream. // At the end of the input stream, Token returns nil, io.EOF. // // Slices of bytes in the returned token data refer to the // parser's internal buffer and remain valid only until the next -// call to Token. To acquire a copy of the bytes, call CopyToken +// call to Token. To acquire a copy of the bytes, call CopyToken // or the token's Copy method. // // Token expands self-closing elements such as
@@ -230,25 +270,33 @@ func NewDecoder(r io.Reader) *Decoder { // // Token guarantees that the StartElement and EndElement // tokens it returns are properly nested and matched: -// if Token encounters an unexpected end element, +// if Token encounters an unexpected end element +// or EOF before all expected end elements, // it will return an error. // // Token implements XML name spaces as described by -// http://www.w3.org/TR/REC-xml-names/. Each of the +// https://www.w3.org/TR/REC-xml-names/. Each of the // Name structures contained in the Token has the Space // set to the URL identifying its name space when known. // If Token encounters an unrecognized name space prefix, // it uses the prefix as the Space rather than report an error. -func (d *Decoder) Token() (t Token, err error) { +func (d *Decoder) Token() (Token, error) { + var t Token + var err error if d.stk != nil && d.stk.kind == stkEOF { - err = io.EOF - return + return nil, io.EOF } if d.nextToken != nil { t = d.nextToken d.nextToken = nil } else if t, err = d.rawToken(); err != nil { - return + switch { + case err == io.EOF && d.t != nil: + err = nil + case err == io.EOF && d.stk != nil && d.stk.kind != stkEOF: + err = d.syntaxError("unexpected EOF") + } + return t, err } if !d.Strict { @@ -264,12 +312,12 @@ func (d *Decoder) Token() (t Token, err error) { // to the other attribute names, so process // the translations first. for _, a := range t1.Attr { - if a.Name.Space == "xmlns" { + if a.Name.Space == xmlnsPrefix { v, ok := d.ns[a.Name.Local] d.pushNs(a.Name.Local, v, ok) d.ns[a.Name.Local] = a.Value } - if a.Name.Space == "" && a.Name.Local == "xmlns" { + if a.Name.Space == "" && a.Name.Local == xmlnsPrefix { // Default space for untagged names v, ok := d.ns[""] d.pushNs("", v, ok) @@ -291,23 +339,27 @@ func (d *Decoder) Token() (t Token, err error) { } t = t1 } - return + return t, err } -const xmlURL = "http://www.w3.org/XML/1998/namespace" +const ( + xmlURL = "http://www.w3.org/XML/1998/namespace" + xmlnsPrefix = "xmlns" + xmlPrefix = "xml" +) // Apply name space translation to name n. // The default name space (for Space=="") // applies only to element names, not to attribute names. func (d *Decoder) translate(n *Name, isElementName bool) { switch { - case n.Space == "xmlns": + case n.Space == xmlnsPrefix: return case n.Space == "" && !isElementName: return - case n.Space == "xml": + case n.Space == xmlPrefix: n.Space = xmlURL - case n.Space == "" && n.Local == "xmlns": + case n.Space == "" && n.Local == xmlnsPrefix: return } if v, ok := d.ns[n.Space]; ok { @@ -330,7 +382,7 @@ func (d *Decoder) switchToReader(r io.Reader) { } // Parsing state - stack holds old name space translations -// and the current set of open elements. The translations to pop when +// and the current set of open elements. The translations to pop when // ending a given tag are *below* it on the stack, which is // more work but forced on us by XML. type stack struct { @@ -501,6 +553,9 @@ func (d *Decoder) RawToken() (Token, error) { } func (d *Decoder) rawToken() (Token, error) { + if d.t != nil { + return d.t.Token() + } if d.err != nil { return nil, d.err } @@ -552,7 +607,6 @@ func (d *Decoder) rawToken() (Token, error) { case '?': // if target == "xml" { - enc := procInstEncoding(string(data)) - if enc != "" && enc != "utf-8" && enc != "UTF-8" { + content := string(data) + ver := procInst("version", content) + if ver != "" && ver != "1.0" { + d.err = fmt.Errorf("xml: unsupported version %q; only version 1.0 is supported", ver) + return nil, d.err + } + enc := procInst("encoding", content) + if enc != "" && enc != "utf-8" && enc != "UTF-8" && !strings.EqualFold(enc, "utf-8") { if d.CharsetReader == nil { d.err = fmt.Errorf("xml: encoding %q declared but Decoder.CharsetReader is nil", enc) return nil, d.err @@ -619,7 +679,12 @@ func (d *Decoder) rawToken() (Token, error) { return nil, d.err } d.buf.WriteByte(b) - if b0 == '-' && b1 == '-' && b == '>' { + if b0 == '-' && b1 == '-' { + if b != '>' { + d.err = d.syntaxError( + `invalid sequence "--" not allowed in comments`) + return nil, d.err + } break } b0, b1 = b1, b @@ -726,7 +791,7 @@ func (d *Decoder) rawToken() (Token, error) { return nil, d.err } - attr = make([]Attr, 0, 4) + attr = []Attr{} for { d.space() if b, ok = d.mustgetc(); !ok { @@ -748,14 +813,7 @@ func (d *Decoder) rawToken() (Token, error) { } d.ungetc(b) - n := len(attr) - if n >= cap(attr) { - nattr := make([]Attr, n, 2*cap(attr)) - copy(nattr, attr) - attr = nattr - } - attr = attr[0 : n+1] - a := &attr[n] + a := Attr{} if a.Name, ok = d.nsname(); !ok { if d.err == nil { d.err = d.syntaxError("expected attribute name in element") @@ -770,10 +828,9 @@ func (d *Decoder) rawToken() (Token, error) { if d.Strict { d.err = d.syntaxError("attribute name without = in element") return nil, d.err - } else { - d.ungetc(b) - a.Value = a.Name.Local } + d.ungetc(b) + a.Value = a.Name.Local } else { d.space() data := d.attrval() @@ -782,6 +839,7 @@ func (d *Decoder) rawToken() (Token, error) { } a.Value = string(data) } + attr = append(attr, a) } if empty { d.needClose = true @@ -812,7 +870,7 @@ func (d *Decoder) attrval() []byte { if !ok { return nil } - // http://www.w3.org/TR/REC-html40/intro/sgmltut.html#h-3.2.2 + // https://www.w3.org/TR/REC-html40/intro/sgmltut.html#h-3.2.2 if 'a' <= b && b <= 'z' || 'A' <= b && b <= 'Z' || '0' <= b && b <= '9' || b == '_' || b == ':' || b == '-' { d.buf.WriteByte(b) @@ -863,9 +921,17 @@ func (d *Decoder) getc() (b byte, ok bool) { if b == '\n' { d.line++ } + d.offset++ return b, true } +// InputOffset returns the input stream byte offset of the current decoder position. +// The offset gives the location of the end of the most recently returned token +// and the beginning of the next token. +func (d *Decoder) InputOffset() int64 { + return d.offset +} + // Return saved offset. // If we did ungetc (nextByte >= 0), have to back up one. func (d *Decoder) savedOffset() int { @@ -895,9 +961,10 @@ func (d *Decoder) ungetc(b byte) { d.line-- } d.nextByte = int(b) + d.offset-- } -var entity = map[string]int{ +var entity = map[string]rune{ "lt": '<', "gt": '>', "amp": '&', @@ -992,7 +1059,7 @@ Input: d.buf.WriteByte(';') n, err := strconv.ParseUint(s, base, 64) if err == nil && n <= unicode.MaxRune { - text = string(n) + text = string(rune(n)) haveText = true } } @@ -1002,7 +1069,6 @@ Input: if d.err != nil { return nil } - ok = false } if b, ok = d.mustgetc(); !ok { return nil @@ -1075,13 +1141,13 @@ Input: } // Decide whether the given rune is in the XML Character Range, per -// the Char production of http://www.xml.com/axml/testaxml.htm, +// the Char production of https://www.xml.com/axml/testaxml.htm, // Section 2.2 Characters. func isInCharacterRange(r rune) (inrange bool) { return r == 0x09 || r == 0x0A || r == 0x0D || - r >= 0x20 && r <= 0xDF77 || + r >= 0x20 && r <= 0xD7FF || r >= 0xE000 && r <= 0xFFFD || r >= 0x10000 && r <= 0x10FFFF } @@ -1113,12 +1179,12 @@ func (d *Decoder) name() (s string, ok bool) { } // Now we check the characters. - s = d.buf.String() - if !isName([]byte(s)) { - d.err = d.syntaxError("invalid XML name: " + s) + b := d.buf.Bytes() + if !isName(b) { + d.err = d.syntaxError("invalid XML name: " + string(b)) return "", false } - return s, true + return string(b), true } // Read a name and append its bytes to d.buf. @@ -1204,8 +1270,8 @@ func isNameString(s string) bool { } // These tables were generated by cut and paste from Appendix B of -// the XML spec at http://www.xml.com/axml/testaxml.htm -// and then reformatting. First corresponds to (Letter | '_' | ':') +// the XML spec at https://www.xml.com/axml/testaxml.htm +// and then reformatting. First corresponds to (Letter | '_' | ':') // and second corresponds to NameChar. var first = &unicode.RangeTable{ @@ -1522,7 +1588,9 @@ var second = &unicode.RangeTable{ // HTMLEntity is an entity map containing translations for the // standard HTML entity characters. -var HTMLEntity = htmlEntity +// +// See the Decoder.Strict and Decoder.Entity fields' documentation. +var HTMLEntity map[string]string = htmlEntity var htmlEntity = map[string]string{ /* @@ -1789,7 +1857,9 @@ var htmlEntity = map[string]string{ // HTMLAutoClose is the set of HTML elements that // should be considered to close automatically. -var HTMLAutoClose = htmlAutoClose +// +// See the Decoder.Strict and Decoder.Entity fields' documentation. +var HTMLAutoClose []string = htmlAutoClose var htmlAutoClose = []string{ /* @@ -1812,20 +1882,27 @@ var htmlAutoClose = []string{ } var ( - esc_quot = []byte(""") // shorter than """ - esc_apos = []byte("'") // shorter than "'" - esc_amp = []byte("&") - esc_lt = []byte("<") - esc_gt = []byte(">") - esc_tab = []byte(" ") - esc_nl = []byte(" ") - esc_cr = []byte(" ") - esc_fffd = []byte("\uFFFD") // Unicode replacement character + escQuot = []byte(""") // shorter than """ + escApos = []byte("'") // shorter than "'" + escAmp = []byte("&") + escLT = []byte("<") + escGT = []byte(">") + escTab = []byte(" ") + escNL = []byte(" ") + escCR = []byte(" ") + escFFFD = []byte("\uFFFD") // Unicode replacement character ) // EscapeText writes to w the properly escaped XML equivalent // of the plain text data s. func EscapeText(w io.Writer, s []byte) error { + return escapeText(w, s, true) +} + +// escapeText writes to w the properly escaped XML equivalent +// of the plain text data s. If escapeNewline is true, newline +// characters will be escaped. +func escapeText(w io.Writer, s []byte, escapeNewline bool) error { var esc []byte last := 0 for i := 0; i < len(s); { @@ -1833,24 +1910,27 @@ func EscapeText(w io.Writer, s []byte) error { i += width switch r { case '"': - esc = esc_quot + esc = escQuot case '\'': - esc = esc_apos + esc = escApos case '&': - esc = esc_amp + esc = escAmp case '<': - esc = esc_lt + esc = escLT case '>': - esc = esc_gt + esc = escGT case '\t': - esc = esc_tab + esc = escTab case '\n': - esc = esc_nl + if !escapeNewline { + continue + } + esc = escNL case '\r': - esc = esc_cr + esc = escCR default: if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) { - esc = esc_fffd + esc = escFFFD break } continue @@ -1863,10 +1943,8 @@ func EscapeText(w io.Writer, s []byte) error { } last = i } - if _, err := w.Write(s[last:]); err != nil { - return err - } - return nil + _, err := w.Write(s[last:]) + return err } // EscapeString writes to p the properly escaped XML equivalent @@ -1879,24 +1957,24 @@ func (p *printer) EscapeString(s string) { i += width switch r { case '"': - esc = esc_quot + esc = escQuot case '\'': - esc = esc_apos + esc = escApos case '&': - esc = esc_amp + esc = escAmp case '<': - esc = esc_lt + esc = escLT case '>': - esc = esc_gt + esc = escGT case '\t': - esc = esc_tab + esc = escTab case '\n': - esc = esc_nl + esc = escNL case '\r': - esc = esc_cr + esc = escCR default: if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) { - esc = esc_fffd + esc = escFFFD break } continue @@ -1915,16 +1993,55 @@ func Escape(w io.Writer, s []byte) { EscapeText(w, s) } -// procInstEncoding parses the `encoding="..."` or `encoding='...'` +var ( + cdataStart = []byte("") + cdataEscape = []byte("]]]]>") +) + +// emitCDATA writes to w the CDATA-wrapped plain text data s. +// It escapes CDATA directives nested in s. +func emitCDATA(w io.Writer, s []byte) error { + if len(s) == 0 { + return nil + } + if _, err := w.Write(cdataStart); err != nil { + return err + } + for { + i := bytes.Index(s, cdataEnd) + if i >= 0 && i+len(cdataEnd) <= len(s) { + // Found a nested CDATA directive end. + if _, err := w.Write(s[:i]); err != nil { + return err + } + if _, err := w.Write(cdataEscape); err != nil { + return err + } + i += len(cdataEnd) + } else { + if _, err := w.Write(s); err != nil { + return err + } + break + } + s = s[i:] + } + _, err := w.Write(cdataEnd) + return err +} + +// procInst parses the `param="..."` or `param='...'` // value out of the provided string, returning "" if not found. -func procInstEncoding(s string) string { +func procInst(param, s string) string { // TODO: this parsing is somewhat lame and not exact. // It works for all actual cases, though. - idx := strings.Index(s, "encoding=") + param = param + "=" + idx := strings.Index(s, param) if idx == -1 { return "" } - v := s[idx+len("encoding="):] + v := s[idx+len(param):] if v == "" { return "" } diff --git a/cluster-autoscaler/vendor/golang.org/x/sync/singleflight/singleflight.go b/cluster-autoscaler/vendor/golang.org/x/sync/singleflight/singleflight.go index 690eb8501347..8473fb7922c1 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sync/singleflight/singleflight.go +++ b/cluster-autoscaler/vendor/golang.org/x/sync/singleflight/singleflight.go @@ -52,10 +52,6 @@ type call struct { val interface{} err error - // forgotten indicates whether Forget was called with this call's key - // while the call was still in flight. - forgotten bool - // These fields are read and written with the singleflight // mutex held before the WaitGroup is done, and are read but // not written after the WaitGroup is done. @@ -148,10 +144,10 @@ func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { c.err = errGoexit } - c.wg.Done() g.mu.Lock() defer g.mu.Unlock() - if !c.forgotten { + c.wg.Done() + if g.m[key] == c { delete(g.m, key) } @@ -204,9 +200,6 @@ func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { // an earlier call to complete. func (g *Group) Forget(key string) { g.mu.Lock() - if c, ok := g.m[key]; ok { - c.forgotten = true - } delete(g.m, key) g.mu.Unlock() } diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/attributes/attributes.go b/cluster-autoscaler/vendor/google.golang.org/grpc/attributes/attributes.go index ae13ddac14e0..02f5dc531891 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/attributes/attributes.go @@ -19,7 +19,7 @@ // Package attributes defines a generic key/value store used in various gRPC // components. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/backoff.go b/cluster-autoscaler/vendor/google.golang.org/grpc/backoff.go index 542594f5cc51..29475e31c979 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/backoff.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/backoff.go @@ -48,7 +48,7 @@ type BackoffConfig struct { // here for more details: // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/balancer/balancer.go b/cluster-autoscaler/vendor/google.golang.org/grpc/balancer/balancer.go index 25713908072c..392b21fb2d8e 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/balancer/balancer.go @@ -110,6 +110,11 @@ type SubConn interface { UpdateAddresses([]resolver.Address) // Connect starts the connecting for this SubConn. Connect() + // GetOrBuildProducer returns a reference to the existing Producer for this + // ProducerBuilder in this SubConn, or, if one does not currently exist, + // creates a new one and returns it. Returns a close function which must + // be called when the Producer is no longer needed. + GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) } // NewSubConnOptions contains options to create new SubConn. @@ -244,7 +249,7 @@ type DoneInfo struct { // ServerLoad is the load received from server. It's usually sent as part of // trailing metadata. // - // The only supported type now is *orca_v1.LoadReport. + // The only supported type now is *orca_v3.LoadReport. ServerLoad interface{} } @@ -371,3 +376,21 @@ type ClientConnState struct { // ErrBadResolverState may be returned by UpdateClientConnState to indicate a // problem with the provided name resolver data. var ErrBadResolverState = errors.New("bad resolver state") + +// A ProducerBuilder is a simple constructor for a Producer. It is used by the +// SubConn to create producers when needed. +type ProducerBuilder interface { + // Build creates a Producer. The first parameter is always a + // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the + // associated SubConn), but is declared as interface{} to avoid a + // dependency cycle. Should also return a close function that will be + // called when all references to the Producer have been given up. + Build(grpcClientConnInterface interface{}) (p Producer, close func()) +} + +// A Producer is a type shared among potentially many consumers. It is +// associated with a SubConn, and an implementation will typically contain +// other methods to provide additional functionality, e.g. configuration or +// subscription registration. +type Producer interface { +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/balancer/base/balancer.go b/cluster-autoscaler/vendor/google.golang.org/grpc/balancer/base/balancer.go index e8dfc828aaac..3929c26d31e1 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -157,8 +157,8 @@ func (b *baseBalancer) mergeErrors() error { // regeneratePicker takes a snapshot of the balancer, and generates a picker // from it. The picker is -// - errPicker if the balancer is in TransientFailure, -// - built by the pickerBuilder with all READY SubConns otherwise. +// - errPicker if the balancer is in TransientFailure, +// - built by the pickerBuilder with all READY SubConns otherwise. func (b *baseBalancer) regeneratePicker() { if b.state == connectivity.TransientFailure { b.picker = NewErrPicker(b.mergeErrors()) diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go b/cluster-autoscaler/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go index a87b6809af38..c33413581091 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go @@ -34,10 +34,10 @@ type ConnectivityStateEvaluator struct { // RecordTransition records state change happening in subConn and based on that // it evaluates what aggregated state should be. // -// - If at least one SubConn in Ready, the aggregated state is Ready; -// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; -// - Else if at least one SubConn is Idle, the aggregated state is Idle; -// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure. +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else if at least one SubConn is Idle, the aggregated state is Idle; +// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure. // // Shutdown is not considered. func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { @@ -55,7 +55,11 @@ func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState conne cse.numIdle += updateVal } } + return cse.CurrentState() +} +// CurrentState returns the current aggregate conn state by evaluating the counters +func (cse *ConnectivityStateEvaluator) CurrentState() connectivity.State { // Evaluate. if cse.numReady > 0 { return connectivity.Ready diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/cluster-autoscaler/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 274eb2f85802..f7031ad2251b 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -22,7 +22,7 @@ package roundrobin import ( - "sync" + "sync/atomic" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" @@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { // Start at a random index, as the same RR balancer rebuilds a new // picker when SubConn states change, and we don't want to apply excess // load to the first server in the list. - next: grpcrand.Intn(len(scs)), + next: uint32(grpcrand.Intn(len(scs))), } } @@ -69,15 +69,13 @@ type rrPicker struct { // created. The slice is immutable. Each Get() will do a round robin // selection from it and return the selected SubConn. subConns []balancer.SubConn - - mu sync.Mutex - next int + next uint32 } func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - p.mu.Lock() - sc := p.subConns[p.next] - p.next = (p.next + 1) % len(p.subConns) - p.mu.Unlock() + subConnsLen := uint32(len(p.subConns)) + nextIndex := atomic.AddUint32(&p.next, 1) + + sc := p.subConns[nextIndex%subConnsLen] return balancer.PickResult{SubConn: sc}, nil } diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/cluster-autoscaler/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index b1c23eaae0db..0359956d36fa 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -19,17 +19,20 @@ package grpc import ( + "context" "fmt" "strings" "sync" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/status" ) // ccBalancerWrapper sits between the ClientConn and the Balancer. @@ -305,7 +308,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err } - acbw := &acBalancerWrapper{ac: ac} + acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)} acbw.ac.mu.Lock() ac.acbw = acbw acbw.ac.mu.Unlock() @@ -359,8 +362,9 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { - mu sync.Mutex - ac *addrConn + mu sync.Mutex + ac *addrConn + producers map[balancer.ProducerBuilder]*refCountedProducer } func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { @@ -414,3 +418,64 @@ func (acbw *acBalancerWrapper) getAddrConn() *addrConn { defer acbw.mu.Unlock() return acbw.ac } + +var errSubConnNotReady = status.Error(codes.Unavailable, "SubConn not currently connected") + +// NewStream begins a streaming RPC on the addrConn. If the addrConn is not +// ready, returns errSubConnNotReady. +func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + transport := acbw.ac.getReadyTransport() + if transport == nil { + return nil, errSubConnNotReady + } + return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) +} + +// Invoke performs a unary RPC. If the addrConn is not ready, returns +// errSubConnNotReady. +func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error { + cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) + if err != nil { + return err + } + if err := cs.SendMsg(args); err != nil { + return err + } + return cs.RecvMsg(reply) +} + +type refCountedProducer struct { + producer balancer.Producer + refs int // number of current refs to the producer + close func() // underlying producer's close function +} + +func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { + acbw.mu.Lock() + defer acbw.mu.Unlock() + + // Look up existing producer from this builder. + pData := acbw.producers[pb] + if pData == nil { + // Not found; create a new one and add it to the producers map. + p, close := pb.Build(acbw) + pData = &refCountedProducer{producer: p, close: close} + acbw.producers[pb] = pData + } + // Account for this new reference. + pData.refs++ + + // Return a cleanup function wrapped in a OnceFunc to remove this reference + // and delete the refCountedProducer from the map if the total reference + // count goes to zero. + unref := func() { + acbw.mu.Lock() + pData.refs-- + if pData.refs == 0 { + defer pData.close() // Run outside the acbw mutex + delete(acbw.producers, pb) + } + acbw.mu.Unlock() + } + return pData.producer, grpcsync.OnceFunc(unref) +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/cluster-autoscaler/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index ed75290cdf34..64a232f28111 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -261,6 +261,7 @@ type GrpcLogEntry struct { // according to the type of the log entry. // // Types that are assignable to Payload: + // // *GrpcLogEntry_ClientHeader // *GrpcLogEntry_ServerHeader // *GrpcLogEntry_Message @@ -694,12 +695,12 @@ func (x *Message) GetData() []byte { // Header keys added by gRPC are omitted. To be more specific, // implementations will not log the following entries, and this is // not to be treated as a truncation: -// - entries handled by grpc that are not user visible, such as those -// that begin with 'grpc-' (with exception of grpc-trace-bin) -// or keys like 'lb-token' -// - transport specific entries, including but not limited to: -// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc -// - entries added for call credentials +// - entries handled by grpc that are not user visible, such as those +// that begin with 'grpc-' (with exception of grpc-trace-bin) +// or keys like 'lb-token' +// - transport specific entries, including but not limited to: +// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc +// - entries added for call credentials // // Implementations must always log grpc-trace-bin if it is present. // Practically speaking it will only be visible on server side because diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/channelz/channelz.go b/cluster-autoscaler/vendor/google.golang.org/grpc/channelz/channelz.go index a220c47c59a5..32b7fa5794e1 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/channelz/channelz.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/channelz/channelz.go @@ -23,7 +23,7 @@ // https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by // the `internal/channelz` package. // -// Experimental +// # Experimental // // Notice: All APIs in this package are experimental and may be removed in a // later release. diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/clientconn.go b/cluster-autoscaler/vendor/google.golang.org/grpc/clientconn.go index 779b03bca1c3..422639c79dbf 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/clientconn.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/clientconn.go @@ -503,7 +503,7 @@ type ClientConn struct { // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -522,7 +522,7 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec // GetState returns the connectivity.State of ClientConn. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. @@ -534,7 +534,7 @@ func (cc *ClientConn) GetState() connectivity.State { // the channel is idle. Does not wait for the connection attempts to begin // before returning. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. @@ -761,7 +761,7 @@ func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { // Target returns the target string of the ClientConn. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -831,9 +831,9 @@ func equalAddresses(a, b []resolver.Address) bool { // // If ac is Ready, it checks whether current connected address of ac is in the // new addrs list. -// - If true, it updates ac.addrs and returns true. The ac will keep using -// the existing connection. -// - If false, it does nothing and returns false. +// - If true, it updates ac.addrs and returns true. The ac will keep using +// the existing connection. +// - If false, it does nothing and returns false. func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { ac.mu.Lock() defer ac.mu.Unlock() @@ -998,7 +998,7 @@ func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { // However, if a previously unavailable network becomes available, this may be // used to trigger an immediate reconnect. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -1228,38 +1228,33 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T // address was not successfully connected, or updates ac appropriately with the // new transport. func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { - // TODO: Delete prefaceReceived and move the logic to wait for it into the - // transport. - prefaceReceived := grpcsync.NewEvent() - connClosed := grpcsync.NewEvent() - addr.ServerName = ac.cc.getServerName(addr) hctx, hcancel := context.WithCancel(ac.ctx) - hcStarted := false // protected by ac.mu - onClose := func() { + onClose := grpcsync.OnceFunc(func() { ac.mu.Lock() defer ac.mu.Unlock() - defer connClosed.Fire() - defer hcancel() - if !hcStarted || hctx.Err() != nil { - // We didn't start the health check or set the state to READY, so - // no need to do anything else here. - // - // OR, we have already cancelled the health check context, meaning - // we have already called onClose once for this transport. In this - // case it would be dangerous to clear the transport and update the - // state, since there may be a new transport in this addrConn. + if ac.state == connectivity.Shutdown { + // Already shut down. tearDown() already cleared the transport and + // canceled hctx via ac.ctx, and we expected this connection to be + // closed, so do nothing here. + return + } + hcancel() + if ac.transport == nil { + // We're still connecting to this address, which could error. Do + // not update the connectivity state or resolve; these will happen + // at the end of the tryAllAddrs connection loop in the event of an + // error. return } ac.transport = nil - // Refresh the name resolver + // Refresh the name resolver on any connection loss. ac.cc.resolveNow(resolver.ResolveNowOptions{}) - if ac.state != connectivity.Shutdown { - ac.updateConnectivityState(connectivity.Idle, nil) - } - } - + // Always go idle and wait for the LB policy to initiate a new + // connection attempt. + ac.updateConnectivityState(connectivity.Idle, nil) + }) onGoAway := func(r transport.GoAwayReason) { ac.mu.Lock() ac.adjustParams(r) @@ -1271,7 +1266,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne defer cancel() copts.ChannelzParentID = ac.channelzID - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose) + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onGoAway, onClose) if err != nil { // newTr is either nil, or closed. hcancel() @@ -1279,60 +1274,34 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne return err } - select { - case <-connectCtx.Done(): - // We didn't get the preface in time. + ac.mu.Lock() + defer ac.mu.Unlock() + if ac.state == connectivity.Shutdown { + // This can happen if the subConn was removed while in `Connecting` + // state. tearDown() would have set the state to `Shutdown`, but + // would not have closed the transport since ac.transport would not + // have been set at that point. + // + // We run this in a goroutine because newTr.Close() calls onClose() + // inline, which requires locking ac.mu. + // // The error we pass to Close() is immaterial since there are no open // streams at this point, so no trailers with error details will be sent // out. We just need to pass a non-nil error. - newTr.Close(transport.ErrConnClosing) - if connectCtx.Err() == context.DeadlineExceeded { - err := errors.New("failed to receive server preface within timeout") - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s: %v", addr, err) - return err - } + go newTr.Close(transport.ErrConnClosing) return nil - case <-prefaceReceived.Done(): - // We got the preface - huzzah! things are good. - ac.mu.Lock() - defer ac.mu.Unlock() - if connClosed.HasFired() { - // onClose called first; go idle but do nothing else. - if ac.state != connectivity.Shutdown { - ac.updateConnectivityState(connectivity.Idle, nil) - } - return nil - } - if ac.state == connectivity.Shutdown { - // This can happen if the subConn was removed while in `Connecting` - // state. tearDown() would have set the state to `Shutdown`, but - // would not have closed the transport since ac.transport would not - // been set at that point. - // - // We run this in a goroutine because newTr.Close() calls onClose() - // inline, which requires locking ac.mu. - // - // The error we pass to Close() is immaterial since there are no open - // streams at this point, so no trailers with error details will be sent - // out. We just need to pass a non-nil error. - go newTr.Close(transport.ErrConnClosing) - return nil - } - ac.curAddr = addr - ac.transport = newTr - hcStarted = true - ac.startHealthCheck(hctx) // Will set state to READY if appropriate. + } + if hctx.Err() != nil { + // onClose was already called for this connection, but the connection + // was successfully established first. Consider it a success and set + // the new state to Idle. + ac.updateConnectivityState(connectivity.Idle, nil) return nil - case <-connClosed.Done(): - // The transport has already closed. If we received the preface, too, - // this is not an error. - select { - case <-prefaceReceived.Done(): - return nil - default: - return errors.New("connection closed before server preface received") - } } + ac.curAddr = addr + ac.transport = newTr + ac.startHealthCheck(hctx) // Will set state to READY if appropriate. + return nil } // startHealthCheck starts the health checking stream (RPC) to watch the health @@ -1583,7 +1552,7 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) } else { channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) - rb = cc.getResolver(parsedTarget.Scheme) + rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget return rb, nil @@ -1604,9 +1573,9 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { return nil, err } channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) - rb = cc.getResolver(parsedTarget.Scheme) + rb = cc.getResolver(parsedTarget.URL.Scheme) if rb == nil { - return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.Scheme) + return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) } cc.parsedTarget = parsedTarget return rb, nil diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/credentials/credentials.go b/cluster-autoscaler/vendor/google.golang.org/grpc/credentials/credentials.go index 96ff1877e754..5feac3aa0e41 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/credentials/credentials.go @@ -36,16 +36,16 @@ import ( // PerRPCCredentials defines the common interface for the credentials which need to // attach security information to every RPC (e.g., oauth2). type PerRPCCredentials interface { - // GetRequestMetadata gets the current request metadata, refreshing - // tokens if required. This should be called by the transport layer on - // each request, and the data should be populated in headers or other - // context. If a status code is returned, it will be used as the status - // for the RPC. uri is the URI of the entry point for the request. - // When supported by the underlying implementation, ctx can be used for - // timeout and cancellation. Additionally, RequestInfo data will be - // available via ctx to this call. - // TODO(zhaoq): Define the set of the qualified keys instead of leaving - // it as an arbitrary string. + // GetRequestMetadata gets the current request metadata, refreshing tokens + // if required. This should be called by the transport layer on each + // request, and the data should be populated in headers or other + // context. If a status code is returned, it will be used as the status for + // the RPC (restricted to an allowable set of codes as defined by gRFC + // A54). uri is the URI of the entry point for the request. When supported + // by the underlying implementation, ctx can be used for timeout and + // cancellation. Additionally, RequestInfo data will be available via ctx + // to this call. TODO(zhaoq): Define the set of the qualified keys instead + // of leaving it as an arbitrary string. GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) // RequireTransportSecurity indicates whether the credentials requires // transport security. diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/credentials/tls.go b/cluster-autoscaler/vendor/google.golang.org/grpc/credentials/tls.go index 784822d0560a..ce2bbc10a142 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/credentials/tls.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/credentials/tls.go @@ -195,7 +195,7 @@ func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error // TLSChannelzSecurityValue defines the struct that TLS protocol should return // from GetSecurityValue(), containing security info like cipher and certificate used. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/dialoptions.go b/cluster-autoscaler/vendor/google.golang.org/grpc/dialoptions.go index 60403bc160ec..9372dc322e80 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/dialoptions.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/dialoptions.go @@ -29,6 +29,7 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal" internalbackoff "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" @@ -36,12 +37,13 @@ import ( ) func init() { - internal.AddExtraDialOptions = func(opt ...DialOption) { + internal.AddGlobalDialOptions = func(opt ...DialOption) { extraDialOptions = append(extraDialOptions, opt...) } - internal.ClearExtraDialOptions = func() { + internal.ClearGlobalDialOptions = func() { extraDialOptions = nil } + internal.WithBinaryLogger = withBinaryLogger } // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -61,6 +63,7 @@ type dialOptions struct { timeout time.Duration scChan <-chan ServiceConfig authority string + binaryLogger binarylog.Logger copts transport.ConnectOptions callOptions []CallOption channelzParentID *channelz.Identifier @@ -401,6 +404,14 @@ func WithStatsHandler(h stats.Handler) DialOption { }) } +// withBinaryLogger returns a DialOption that specifies the binary logger for +// this ClientConn. +func withBinaryLogger(bl binarylog.Logger) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.binaryLogger = bl + }) +} + // FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on // non-temporary dial errors. If f is true, and dialer returns a non-temporary // error, gRPC will fail the connection to the network address and won't try to diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/encoding/encoding.go b/cluster-autoscaler/vendor/google.golang.org/grpc/encoding/encoding.go index 18e530fc9024..711763d54fb7 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/encoding/encoding.go @@ -19,7 +19,7 @@ // Package encoding defines the interface for the compressor and codec, and // functions to register and retrieve compressors and codecs. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. @@ -28,6 +28,8 @@ package encoding import ( "io" "strings" + + "google.golang.org/grpc/internal/grpcutil" ) // Identity specifies the optional encoding for uncompressed streams. @@ -73,6 +75,7 @@ var registeredCompressor = make(map[string]Compressor) // registered with the same name, the one registered last will take effect. func RegisterCompressor(c Compressor) { registeredCompressor[c.Name()] = c + grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name()) } // GetCompressor returns Compressor for the given compressor name. diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/encoding/gzip/gzip.go b/cluster-autoscaler/vendor/google.golang.org/grpc/encoding/gzip/gzip.go index ce2f15ed288f..ca820bd47d44 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/encoding/gzip/gzip.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/encoding/gzip/gzip.go @@ -19,7 +19,7 @@ // Package gzip implements and registers the gzip compressor // during the initialization. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/cluster-autoscaler/vendor/google.golang.org/grpc/grpclog/loggerv2.go index 7c1f66409034..b5560b47ec4b 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -242,7 +242,7 @@ func (g *loggerT) V(l int) bool { // DepthLoggerV2, the below functions will be called with the appropriate stack // depth set for trivial functions the logger may ignore. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/cluster-autoscaler/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index 69f525d1baeb..a332dfd7b54e 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -1,3 +1,20 @@ +// Copyright 2015 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go index e3dfe204f9ae..809d73ccafb0 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -37,7 +37,7 @@ type Logger interface { // binLogger is the global binary logger for the binary. One of this should be // built at init time from the configuration (environment variable or flags). // -// It is used to get a methodLogger for each individual method. +// It is used to get a MethodLogger for each individual method. var binLogger Logger var grpclogLogger = grpclog.Component("binarylog") @@ -56,11 +56,11 @@ func GetLogger() Logger { return binLogger } -// GetMethodLogger returns the methodLogger for the given methodName. +// GetMethodLogger returns the MethodLogger for the given methodName. // // methodName should be in the format of "/service/method". // -// Each methodLogger returned by this method is a new instance. This is to +// Each MethodLogger returned by this method is a new instance. This is to // generate sequence id within the call. func GetMethodLogger(methodName string) MethodLogger { if binLogger == nil { @@ -117,7 +117,7 @@ func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error { // Set method logger for "service/*". // -// New methodLogger with same service overrides the old one. +// New MethodLogger with same service overrides the old one. func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error { if _, ok := l.config.Services[service]; ok { return fmt.Errorf("conflicting service rules for service %v found", service) @@ -131,7 +131,7 @@ func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) // Set method logger for "service/method". // -// New methodLogger with same method overrides the old one. +// New MethodLogger with same method overrides the old one. func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error { if _, ok := l.config.Blacklist[method]; ok { return fmt.Errorf("conflicting blacklist rules for method %v found", method) @@ -161,11 +161,11 @@ func (l *logger) setBlacklist(method string) error { return nil } -// getMethodLogger returns the methodLogger for the given methodName. +// getMethodLogger returns the MethodLogger for the given methodName. // // methodName should be in the format of "/service/method". // -// Each methodLogger returned by this method is a new instance. This is to +// Each MethodLogger returned by this method is a new instance. This is to // generate sequence id within the call. func (l *logger) GetMethodLogger(methodName string) MethodLogger { s, m, err := grpcutil.ParseMethod(methodName) @@ -174,16 +174,16 @@ func (l *logger) GetMethodLogger(methodName string) MethodLogger { return nil } if ml, ok := l.config.Methods[s+"/"+m]; ok { - return newMethodLogger(ml.Header, ml.Message) + return NewTruncatingMethodLogger(ml.Header, ml.Message) } if _, ok := l.config.Blacklist[s+"/"+m]; ok { return nil } if ml, ok := l.config.Services[s]; ok { - return newMethodLogger(ml.Header, ml.Message) + return NewTruncatingMethodLogger(ml.Header, ml.Message) } if l.config.All == nil { return nil } - return newMethodLogger(l.config.All.Header, l.config.All.Message) + return NewTruncatingMethodLogger(l.config.All.Header, l.config.All.Message) } diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/binarylog/env_config.go index ab589a76bf96..f9e80e27ab68 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/binarylog/env_config.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -30,15 +30,15 @@ import ( // to build a new logger and assign it to binarylog.Logger. // // Example filter config strings: -// - "" Nothing will be logged -// - "*" All headers and messages will be fully logged. -// - "*{h}" Only headers will be logged. -// - "*{m:256}" Only the first 256 bytes of each message will be logged. -// - "Foo/*" Logs every method in service Foo -// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar -// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method -// /Foo/Bar, logs all headers and messages in every other method in service -// Foo. +// - "" Nothing will be logged +// - "*" All headers and messages will be fully logged. +// - "*{h}" Only headers will be logged. +// - "*{m:256}" Only the first 256 bytes of each message will be logged. +// - "Foo/*" Logs every method in service Foo +// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar +// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method +// /Foo/Bar, logs all headers and messages in every other method in service +// Foo. // // If two configs exist for one certain method or service, the one specified // later overrides the previous config. @@ -57,7 +57,7 @@ func NewLoggerFromConfigString(s string) Logger { return l } -// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds +// fillMethodLoggerWithConfigString parses config, creates TruncatingMethodLogger and adds // it to the right map in the logger. func (l *logger) fillMethodLoggerWithConfigString(config string) error { // "" is invalid. diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 24df0a1a0c4e..179f4a26d135 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -52,7 +52,9 @@ type MethodLogger interface { Log(LogEntryConfig) } -type methodLogger struct { +// TruncatingMethodLogger is a method logger that truncates headers and messages +// based on configured fields. +type TruncatingMethodLogger struct { headerMaxLen, messageMaxLen uint64 callID uint64 @@ -61,8 +63,9 @@ type methodLogger struct { sink Sink // TODO(blog): make this plugable. } -func newMethodLogger(h, m uint64) *methodLogger { - return &methodLogger{ +// NewTruncatingMethodLogger returns a new truncating method logger. +func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { + return &TruncatingMethodLogger{ headerMaxLen: h, messageMaxLen: m, @@ -75,8 +78,8 @@ func newMethodLogger(h, m uint64) *methodLogger { // Build is an internal only method for building the proto message out of the // input event. It's made public to enable other library to reuse as much logic -// in methodLogger as possible. -func (ml *methodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry { +// in TruncatingMethodLogger as possible. +func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry { m := c.toProto() timestamp, _ := ptypes.TimestampProto(time.Now()) m.Timestamp = timestamp @@ -95,11 +98,11 @@ func (ml *methodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry { } // Log creates a proto binary log entry, and logs it to the sink. -func (ml *methodLogger) Log(c LogEntryConfig) { +func (ml *TruncatingMethodLogger) Log(c LogEntryConfig) { ml.sink.Write(ml.Build(c)) } -func (ml *methodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { +func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { if ml.headerMaxLen == maxUInt { return false } @@ -129,7 +132,7 @@ func (ml *methodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { return truncated } -func (ml *methodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { +func (ml *TruncatingMethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { if ml.messageMaxLen == maxUInt { return false } diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/channelz/types.go b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/channelz/types.go index ad0ce4dabf06..7b2f350e2e64 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/channelz/types.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -273,10 +273,10 @@ func (c *channel) deleteSelfFromMap() (delete bool) { // deleteSelfIfReady tries to delete the channel itself from the channelz database. // The delete process includes two steps: -// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its -// parent's child list. -// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id -// will return entry not found error. +// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its +// parent's child list. +// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id +// will return entry not found error. func (c *channel) deleteSelfIfReady() { if !c.deleteSelfFromTree() { return @@ -381,10 +381,10 @@ func (sc *subChannel) deleteSelfFromMap() (delete bool) { // deleteSelfIfReady tries to delete the subchannel itself from the channelz database. // The delete process includes two steps: -// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from -// its parent's child list. -// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup -// by id will return entry not found error. +// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from +// its parent's child list. +// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup +// by id will return entry not found error. func (sc *subChannel) deleteSelfIfReady() { if !sc.deleteSelfFromTree() { return diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 6f0272543110..7edd196bd3da 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -25,11 +25,15 @@ import ( ) const ( - prefix = "GRPC_GO_" - txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" + prefix = "GRPC_GO_" + txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" + advertiseCompressorsStr = prefix + "ADVERTISE_COMPRESSORS" ) var ( // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false") + // AdvertiseCompressors is set if registered compressor should be advertised + // ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false"). + AdvertiseCompressors = !strings.EqualFold(os.Getenv(advertiseCompressorsStr), "false") ) diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/envconfig/observability.go b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/envconfig/observability.go new file mode 100644 index 000000000000..821dd0a7c198 --- /dev/null +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/envconfig/observability.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package envconfig + +import "os" + +const ( + envObservabilityConfig = "GRPC_GCP_OBSERVABILITY_CONFIG" + envObservabilityConfigFile = "GRPC_GCP_OBSERVABILITY_CONFIG_FILE" +) + +var ( + // ObservabilityConfig is the json configuration for the gcp/observability + // package specified directly in the envObservabilityConfig env var. + ObservabilityConfig = os.Getenv(envObservabilityConfig) + // ObservabilityConfigFile is the json configuration for the + // gcp/observability specified in a file with the location specified in + // envObservabilityConfigFile env var. + ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile) +) diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/envconfig/xds.go index a83b26bb869c..af09711a3e88 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -41,6 +41,7 @@ const ( clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC" + outlierDetectionSupportEnv = "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION" rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB" @@ -83,9 +84,9 @@ var ( // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false") // XDSOutlierDetection indicates whether outlier detection support is - // enabled, which can be enabled by setting the environment variable - // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "true". - XDSOutlierDetection = false + // enabled, which can be disabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false". + XDSOutlierDetection = !strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "false") // XDSFederation indicates whether federation support is enabled. XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true") diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go index 30a3b4258fc0..b68e26a36493 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -110,7 +110,7 @@ type LoggerV2 interface { // This is a copy of the DepthLoggerV2 defined in the external grpclog package. // It is defined here to avoid a circular dependency. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go index 740f83c2b766..517ea70642a1 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -52,6 +52,13 @@ func Intn(n int) int { return r.Intn(n) } +// Int31n implements rand.Int31n on the grpcrand global source. +func Int31n(n int32) int32 { + mu.Lock() + defer mu.Unlock() + return r.Int31n(n) +} + // Float64 implements rand.Float64 on the grpcrand global source. func Float64() float64 { mu.Lock() diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go new file mode 100644 index 000000000000..6635f7bca96d --- /dev/null +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go @@ -0,0 +1,32 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "sync" +) + +// OnceFunc returns a function wrapping f which ensures f is only executed +// once even if the returned function is executed multiple times. +func OnceFunc(f func()) func() { + var once sync.Once + return func() { + once.Do(f) + } +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go new file mode 100644 index 000000000000..9f4090967980 --- /dev/null +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go @@ -0,0 +1,47 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "strings" + + "google.golang.org/grpc/internal/envconfig" +) + +// RegisteredCompressorNames holds names of the registered compressors. +var RegisteredCompressorNames []string + +// IsCompressorNameRegistered returns true when name is available in registry. +func IsCompressorNameRegistered(name string) bool { + for _, compressor := range RegisteredCompressorNames { + if compressor == name { + return true + } + } + return false +} + +// RegisteredCompressors returns a string of registered compressor names +// separated by comma. +func RegisteredCompressors() string { + if !envconfig.AdvertiseCompressors { + return "" + } + return strings.Join(RegisteredCompressorNames, ",") +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/grpcutil/method.go index e9c4af64830c..ec62b4775e5b 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/grpcutil/method.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/grpcutil/method.go @@ -25,7 +25,6 @@ import ( // ParseMethod splits service and method from the input. It expects format // "/service/method". -// func ParseMethod(methodName string) (service, method string, _ error) { if !strings.HasPrefix(methodName, "/") { return "", "", errors.New("invalid method name: should start with /") diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/internal.go b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/internal.go index 83018be7c718..fd0ee3dcaf1e 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/internal.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/internal.go @@ -63,20 +63,30 @@ var ( // xDS-enabled server invokes this method on a grpc.Server when a particular // listener moves to "not-serving" mode. DrainServerTransports interface{} // func(*grpc.Server, string) - // AddExtraServerOptions adds an array of ServerOption that will be + // AddGlobalServerOptions adds an array of ServerOption that will be // effective globally for newly created servers. The priority will be: 1. // user-provided; 2. this method; 3. default values. - AddExtraServerOptions interface{} // func(opt ...ServerOption) - // ClearExtraServerOptions clears the array of extra ServerOption. This + AddGlobalServerOptions interface{} // func(opt ...ServerOption) + // ClearGlobalServerOptions clears the array of extra ServerOption. This // method is useful in testing and benchmarking. - ClearExtraServerOptions func() - // AddExtraDialOptions adds an array of DialOption that will be effective + ClearGlobalServerOptions func() + // AddGlobalDialOptions adds an array of DialOption that will be effective // globally for newly created client channels. The priority will be: 1. // user-provided; 2. this method; 3. default values. - AddExtraDialOptions interface{} // func(opt ...DialOption) - // ClearExtraDialOptions clears the array of extra DialOption. This + AddGlobalDialOptions interface{} // func(opt ...DialOption) + // ClearGlobalDialOptions clears the array of extra DialOption. This // method is useful in testing and benchmarking. - ClearExtraDialOptions func() + ClearGlobalDialOptions func() + // JoinServerOptions combines the server options passed as arguments into a + // single server option. + JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption + + // WithBinaryLogger returns a DialOption that specifies the binary logger + // for a ClientConn. + WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption + // BinaryLogger returns a ServerOption that can set the binary logger for a + // server. + BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using // the provided xds bootstrap config instead of the global configuration from @@ -117,22 +127,6 @@ var ( // // TODO: Remove this function once the RBAC env var is removed. UnregisterRBACHTTPFilterForTesting func() - - // RegisterOutlierDetectionBalancerForTesting registers the Outlier - // Detection Balancer for testing purposes, regardless of the Outlier - // Detection environment variable. - // - // TODO: Remove this function once the Outlier Detection env var is removed. - RegisterOutlierDetectionBalancerForTesting func() - - // UnregisterOutlierDetectionBalancerForTesting unregisters the Outlier - // Detection Balancer for testing purposes. This is needed because there is - // no way to unregister the Outlier Detection Balancer after registering it - // solely for testing purposes using - // RegisterOutlierDetectionBalancerForTesting(). - // - // TODO: Remove this function once the Outlier Detection env var is removed. - UnregisterOutlierDetectionBalancerForTesting func() ) // HealthChecker defines the signature of the client-side LB channel health checking function. diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go index 20852e59df29..7f1a702cacbe 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -49,8 +49,9 @@ func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolv } addr := resolver.Address{Addr: endpoint} if b.scheme == unixAbstractScheme { - // prepend "\x00" to address for unix-abstract - addr.Addr = "\x00" + addr.Addr + // We can not prepend \0 as c++ gRPC does, as in Golang '@' is used to signify we do + // not want trailing \0 in address. + addr.Addr = "@" + addr.Addr } cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}}) return &nopResolver{}, nil diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go index badbdbf597f3..51e733e495a3 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go @@ -67,10 +67,10 @@ func (bc *BalancerConfig) MarshalJSON() ([]byte, error) { // ServiceConfig contains a list of loadBalancingConfigs, each with a name and // config. This method iterates through that list in order, and stops at the // first policy that is supported. -// - If the config for the first supported policy is invalid, the whole service -// config is invalid. -// - If the list doesn't contain any supported policy, the whole service config -// is invalid. +// - If the config for the first supported policy is invalid, the whole service +// config is invalid. +// - If the list doesn't contain any supported policy, the whole service config +// is invalid. func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { var ir intermediateBalancerConfig err := json.Unmarshal(b, &ir) diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/status/status.go b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/status/status.go index e5c6513edd13..b0ead4f54f82 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/status/status.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/status/status.go @@ -164,3 +164,13 @@ func (e *Error) Is(target error) bool { } return proto.Equal(e.s.s, tse.s.s) } + +// IsRestrictedControlPlaneCode returns whether the status includes a code +// restricted for control plane usage as defined by gRFC A54. +func IsRestrictedControlPlaneCode(s *Status) bool { + switch s.Code() { + case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.DataLoss: + return true + } + return false +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 244f4b081d52..409769f48fdc 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -886,9 +886,9 @@ func (l *loopyWriter) processData() (bool, error) { dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. // A data item is represented by a dataFrame, since it later translates into // multiple HTTP2 data frames. - // Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data. + // Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data. // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the - // maximum possilbe HTTP2 frame size. + // maximum possible HTTP2 frame size. if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame // Client sends out empty data frame with endStream = true diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 090120925bb4..fb272235d817 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -442,10 +442,10 @@ func (ht *serverHandlerTransport) Drain() { // mapRecvMsgError returns the non-nil err into the appropriate // error value as expected by callers of *grpc.parser.recvMsg. // In particular, in can only be: -// * io.EOF -// * io.ErrUnexpectedEOF -// * of type transport.ConnectionError -// * an error from the status package +// - io.EOF +// - io.ErrUnexpectedEOF +// - of type transport.ConnectionError +// - an error from the status package func mapRecvMsgError(err error) error { if err == io.EOF || err == io.ErrUnexpectedEOF { return err diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 28c77af70aba..d518b07e16f7 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -38,8 +38,10 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" @@ -99,16 +101,13 @@ type http2Client struct { maxSendHeaderListSize *uint32 bdpEst *bdpEstimator - // onPrefaceReceipt is a callback that client transport calls upon - // receiving server preface to signal that a succefull HTTP2 - // connection was established. - onPrefaceReceipt func() maxConcurrentStreams uint32 streamQuota int64 streamsQuotaAvailable chan struct{} waitingStreams uint32 nextID uint32 + registeredCompressors string // Do not access controlBuf with mu held. mu sync.Mutex // guard the following variables @@ -194,7 +193,7 @@ func isTemporary(err error) bool { // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { +func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { @@ -216,12 +215,35 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) } + // Any further errors will close the underlying connection defer func(conn net.Conn) { if err != nil { conn.Close() } }(conn) + + // The following defer and goroutine monitor the connectCtx for cancelation + // and deadline. On context expiration, the connection is hard closed and + // this function will naturally fail as a result. Otherwise, the defer + // waits for the goroutine to exit to prevent the context from being + // monitored (and to prevent the connection from ever being closed) after + // returning from this function. + ctxMonitorDone := grpcsync.NewEvent() + newClientCtx, newClientDone := context.WithCancel(connectCtx) + defer func() { + newClientDone() // Awaken the goroutine below if connectCtx hasn't expired. + <-ctxMonitorDone.Done() // Wait for the goroutine below to exit. + }() + go func(conn net.Conn) { + defer ctxMonitorDone.Fire() // Signal this goroutine has exited. + <-newClientCtx.Done() // Block until connectCtx expires or the defer above executes. + if connectCtx.Err() != nil { + // connectCtx expired before exiting the function. Hard close the connection. + conn.Close() + } + }(conn) + kp := opts.KeepaliveParams // Validate keepalive parameters. if kp.Time == 0 { @@ -253,15 +275,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } } if transportCreds != nil { - rawConn := conn - // Pull the deadline from the connectCtx, which will be used for - // timeouts in the authentication protocol handshake. Can ignore the - // boolean as the deadline will return the zero value, which will make - // the conn not timeout on I/O operations. - deadline, _ := connectCtx.Deadline() - rawConn.SetDeadline(deadline) - conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, rawConn) - rawConn.SetDeadline(time.Time{}) + conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn) if err != nil { return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) } @@ -299,6 +313,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ctxDone: ctx.Done(), // Cache Done chan. cancel: cancel, userAgent: opts.UserAgent, + registeredCompressors: grpcutil.RegisteredCompressors(), conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), @@ -315,17 +330,18 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts kp: kp, statsHandlers: opts.StatsHandlers, initialWindowSize: initialWindowSize, - onPrefaceReceipt: onPrefaceReceipt, nextID: 1, maxConcurrentStreams: defaultMaxStreamsClient, streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), czData: new(channelzData), onGoAway: onGoAway, - onClose: onClose, keepaliveEnabled: keepaliveEnabled, bufferPool: newBufferPool(), + onClose: onClose, } + // Add peer information to the http2client context. + t.ctx = peer.NewContext(t.ctx, t.getPeer()) if md, ok := addr.Metadata.(*metadata.MD); ok { t.md = *md @@ -361,21 +377,32 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts t.kpDormancyCond = sync.NewCond(&t.mu) go t.keepalive() } - // Start the reader goroutine for incoming message. Each transport has - // a dedicated goroutine which reads HTTP2 frame from network. Then it - // dispatches the frame to the corresponding stream entity. - go t.reader() + + // Start the reader goroutine for incoming messages. Each transport has a + // dedicated goroutine which reads HTTP2 frames from the network. Then it + // dispatches the frame to the corresponding stream entity. When the + // server preface is received, readerErrCh is closed. If an error occurs + // first, an error is pushed to the channel. This must be checked before + // returning from this function. + readerErrCh := make(chan error, 1) + go t.reader(readerErrCh) + defer func() { + if err == nil { + err = <-readerErrCh + } + if err != nil { + t.Close(err) + } + }() // Send connection preface to server. n, err := t.conn.Write(clientPreface) if err != nil { err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err) - t.Close(err) return nil, err } if n != len(clientPreface) { err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) - t.Close(err) return nil, err } var ss []http2.Setting @@ -395,14 +422,12 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts err = t.framer.fr.WriteSettings(ss...) if err != nil { err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) - t.Close(err) return nil, err } // Adjust the connection flow control window if needed. if delta := uint32(icwz - defaultWindowSize); delta > 0 { if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { err = connectionErrorf(true, err, "transport: failed to write window update: %v", err) - t.Close(err) return nil, err } } @@ -469,7 +494,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { func (t *http2Client) getPeer() *peer.Peer { return &peer.Peer{ Addr: t.remoteAddr, - AuthInfo: t.authInfo, + AuthInfo: t.authInfo, // Can be nil } } @@ -505,9 +530,22 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) } + registeredCompressors := t.registeredCompressors if callHdr.SendCompress != "" { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress}) + // Include the outgoing compressor name when compressor is not registered + // via encoding.RegisterCompressor. This is possible when client uses + // WithCompressor dial option. + if !grpcutil.IsCompressorNameRegistered(callHdr.SendCompress) { + if registeredCompressors != "" { + registeredCompressors += "," + } + registeredCompressors += callHdr.SendCompress + } + } + + if registeredCompressors != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: registeredCompressors}) } if dl, ok := ctx.Deadline(); ok { // Send out timeout regardless its value. The server can detect timeout context by itself. @@ -587,7 +625,11 @@ func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[s for _, c := range t.perRPCCreds { data, err := c.GetRequestMetadata(ctx, audience) if err != nil { - if _, ok := status.FromError(err); ok { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) + } return nil, err } @@ -616,7 +658,14 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call } data, err := callCreds.GetRequestMetadata(ctx, audience) if err != nil { - return nil, status.Errorf(codes.Internal, "transport: %v", err) + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) + } + return nil, err + } + return nil, status.Errorf(codes.Internal, "transport: per-RPC creds failed due to error: %v", err) } callAuthData = make(map[string]string, len(data)) for k, v := range data { @@ -632,13 +681,13 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call // NewStream errors result in transparent retry, as they mean nothing went onto // the wire. However, there are two notable exceptions: // -// 1. If the stream headers violate the max header list size allowed by the -// server. It's possible this could succeed on another transport, even if -// it's unlikely, but do not transparently retry. -// 2. If the credentials errored when requesting their headers. In this case, -// it's possible a retry can fix the problem, but indefinitely transparently -// retrying is not appropriate as it is likely the credentials, if they can -// eventually succeed, would need I/O to do so. +// 1. If the stream headers violate the max header list size allowed by the +// server. It's possible this could succeed on another transport, even if +// it's unlikely, but do not transparently retry. +// 2. If the credentials errored when requesting their headers. In this case, +// it's possible a retry can fix the problem, but indefinitely transparently +// retrying is not appropriate as it is likely the credentials, if they can +// eventually succeed, would need I/O to do so. type NewStreamError struct { Err error @@ -878,19 +927,15 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. // Close kicks off the shutdown process of the transport. This should be called // only once on a transport. Once it is called, the transport should not be // accessed any more. -// -// This method blocks until the addrConn that initiated this transport is -// re-connected. This happens because t.onClose() begins reconnect logic at the -// addrConn level and blocks until the addrConn is successfully connected. func (t *http2Client) Close(err error) { t.mu.Lock() - // Make sure we only Close once. + // Make sure we only close once. if t.state == closing { t.mu.Unlock() return } - // Call t.onClose before setting the state to closing to prevent the client - // from attempting to create new streams ASAP. + // Call t.onClose ASAP to prevent the client from attempting to create new + // streams. t.onClose() t.state = closing streams := t.activeStreams @@ -1230,18 +1275,29 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { if upperLimit == 0 { // This is the first GoAway Frame. upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. } + + t.prevGoAwayID = id + if len(t.activeStreams) == 0 { + t.mu.Unlock() + t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) + return + } + + streamsToClose := make([]*Stream, 0) for streamID, stream := range t.activeStreams { if streamID > id && streamID <= upperLimit { // The stream was unprocessed by the server. - atomic.StoreUint32(&stream.unprocessed, 1) - t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) + if streamID > id && streamID <= upperLimit { + atomic.StoreUint32(&stream.unprocessed, 1) + streamsToClose = append(streamsToClose, stream) + } } } - t.prevGoAwayID = id - active := len(t.activeStreams) t.mu.Unlock() - if active == 0 { - t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) + // Called outside t.mu because closeStream can take controlBuf's mu, which + // could induce deadlock and is not allowed. + for _, stream := range streamsToClose { + t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) } } @@ -1469,33 +1525,35 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) } -// reader runs as a separate goroutine in charge of reading data from network -// connection. -// -// TODO(zhaoq): currently one reader per transport. Investigate whether this is -// optimal. -// TODO(zhaoq): Check the validity of the incoming frame sequence. -func (t *http2Client) reader() { - defer close(t.readerDone) - // Check the validity of server preface. +// readServerPreface reads and handles the initial settings frame from the +// server. +func (t *http2Client) readServerPreface() error { frame, err := t.framer.fr.ReadFrame() if err != nil { - err = connectionErrorf(true, err, "error reading server preface: %v", err) - t.Close(err) // this kicks off resetTransport, so must be last before return - return - } - t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!) - if t.keepaliveEnabled { - atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + return connectionErrorf(true, err, "error reading server preface: %v", err) } sf, ok := frame.(*http2.SettingsFrame) if !ok { - // this kicks off resetTransport, so must be last before return - t.Close(connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame)) - return + return connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame) } - t.onPrefaceReceipt() t.handleSettings(sf, true) + return nil +} + +// reader verifies the server preface and reads all subsequent data from +// network connection. If the server preface is not read successfully, an +// error is pushed to errCh; otherwise errCh is closed with no error. +func (t *http2Client) reader(errCh chan<- error) { + defer close(t.readerDone) + + if err := t.readServerPreface(); err != nil { + errCh <- err + return + } + close(errCh) + if t.keepaliveEnabled { + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + } // loop to keep reading incoming messages on this transport. for { diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 28bcba0a33c6..3dd15647bc84 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -265,6 +265,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, czData: new(channelzData), bufferPool: newBufferPool(), } + // Add peer information to the http2server context. + t.ctx = peer.NewContext(t.ctx, t.getPeer()) + t.controlBuf = newControlBuffer(t.done) if dynamicWindow { t.bdpEst = &bdpEstimator{ @@ -485,14 +488,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } else { s.ctx, s.cancel = context.WithCancel(t.ctx) } - pr := &peer.Peer{ - Addr: t.remoteAddr, - } - // Attach Auth info if there is any. - if t.authInfo != nil { - pr.AuthInfo = t.authInfo - } - s.ctx = peer.NewContext(s.ctx, pr) + // Attach the received metadata to the context. if len(mdata) > 0 { s.ctx = metadata.NewIncomingContext(s.ctx, mdata) @@ -1416,6 +1412,13 @@ func (t *http2Server) getOutFlowWindow() int64 { } } +func (t *http2Server) getPeer() *peer.Peer { + return &peer.Peer{ + Addr: t.remoteAddr, + AuthInfo: t.authInfo, // Can be nil + } +} + func getJitter(v time.Duration) time.Duration { if v == infinity { return 0 diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/transport/http_util.go b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/transport/http_util.go index 56e95788d9cb..2c601a864d99 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -20,7 +20,6 @@ package transport import ( "bufio" - "bytes" "encoding/base64" "fmt" "io" @@ -45,7 +44,7 @@ import ( const ( // http2MaxFrameLen specifies the max length of a HTTP2 frame. http2MaxFrameLen = 16384 // 16KB frame - // http://http2.github.io/http2-spec/#SettingValues + // https://httpwg.org/specs/rfc7540.html#SettingValues http2InitHeaderTableSize = 4096 ) @@ -251,13 +250,13 @@ func encodeGrpcMessage(msg string) string { } func encodeGrpcMessageUnchecked(msg string) string { - var buf bytes.Buffer + var sb strings.Builder for len(msg) > 0 { r, size := utf8.DecodeRuneInString(msg) for _, b := range []byte(string(r)) { if size > 1 { // If size > 1, r is not ascii. Always do percent encoding. - buf.WriteString(fmt.Sprintf("%%%02X", b)) + fmt.Fprintf(&sb, "%%%02X", b) continue } @@ -266,14 +265,14 @@ func encodeGrpcMessageUnchecked(msg string) string { // // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD". if b >= spaceByte && b <= tildeByte && b != percentByte { - buf.WriteByte(b) + sb.WriteByte(b) } else { - buf.WriteString(fmt.Sprintf("%%%02X", b)) + fmt.Fprintf(&sb, "%%%02X", b) } } msg = msg[size:] } - return buf.String() + return sb.String() } // decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. @@ -291,23 +290,23 @@ func decodeGrpcMessage(msg string) string { } func decodeGrpcMessageUnchecked(msg string) string { - var buf bytes.Buffer + var sb strings.Builder lenMsg := len(msg) for i := 0; i < lenMsg; i++ { c := msg[i] if c == percentByte && i+2 < lenMsg { parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8) if err != nil { - buf.WriteByte(c) + sb.WriteByte(c) } else { - buf.WriteByte(byte(parsed)) + sb.WriteByte(byte(parsed)) i += 2 } } else { - buf.WriteByte(c) + sb.WriteByte(c) } } - return buf.String() + return sb.String() } type bufWriter struct { diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/transport/transport.go b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/transport/transport.go index 6c3ba8515940..2e615ee20cc5 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -43,6 +43,10 @@ import ( "google.golang.org/grpc/tap" ) +// ErrNoHeaders is used as a signal that a trailers only response was received, +// and is not a real error. +var ErrNoHeaders = errors.New("stream has no headers") + const logLevel = 2 type bufferPool struct { @@ -366,9 +370,15 @@ func (s *Stream) Header() (metadata.MD, error) { return s.header.Copy(), nil } s.waitOnHeader() + if !s.headerValid { return nil, s.status.Err() } + + if s.noHeaders { + return nil, ErrNoHeaders + } + return s.header.Copy(), nil } @@ -573,8 +583,8 @@ type ConnectOptions struct { // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. -func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { - return newHTTP2Client(connectCtx, ctx, addr, opts, onPrefaceReceipt, onGoAway, onClose) +func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, addr, opts, onGoAway, onClose) } // Options provides additional hints and information for message diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/metadata/metadata.go b/cluster-autoscaler/vendor/google.golang.org/grpc/metadata/metadata.go index 8e0f6abe89d7..fb4a88f59bd3 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/metadata/metadata.go @@ -41,16 +41,17 @@ type MD map[string][]string // New creates an MD from a given key-value map. // // Only the following ASCII characters are allowed in keys: -// - digits: 0-9 -// - uppercase letters: A-Z (normalized to lower) -// - lowercase letters: a-z -// - special characters: -_. +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// // Uppercase letters are automatically converted to lowercase. // // Keys beginning with "grpc-" are reserved for grpc-internal use only and may // result in errors if set in metadata. func New(m map[string]string) MD { - md := MD{} + md := make(MD, len(m)) for k, val := range m { key := strings.ToLower(k) md[key] = append(md[key], val) @@ -62,10 +63,11 @@ func New(m map[string]string) MD { // Pairs panics if len(kv) is odd. // // Only the following ASCII characters are allowed in keys: -// - digits: 0-9 -// - uppercase letters: A-Z (normalized to lower) -// - lowercase letters: a-z -// - special characters: -_. +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// // Uppercase letters are automatically converted to lowercase. // // Keys beginning with "grpc-" are reserved for grpc-internal use only and may @@ -74,7 +76,7 @@ func Pairs(kv ...string) MD { if len(kv)%2 == 1 { panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) } - md := MD{} + md := make(MD, len(kv)/2) for i := 0; i < len(kv); i += 2 { key := strings.ToLower(kv[i]) md[key] = append(md[key], kv[i+1]) @@ -182,19 +184,51 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { if !ok { return nil, false } - out := MD{} + out := make(MD, len(md)) for k, v := range md { // We need to manually convert all keys to lower case, because MD is a // map, and there's no guarantee that the MD attached to the context is // created using our helper functions. key := strings.ToLower(k) - s := make([]string, len(v)) - copy(s, v) - out[key] = s + out[key] = copyOf(v) } return out, true } +// ValueFromIncomingContext returns the metadata value corresponding to the metadata +// key from the incoming metadata if it exists. Key must be lower-case. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ValueFromIncomingContext(ctx context.Context, key string) []string { + md, ok := ctx.Value(mdIncomingKey{}).(MD) + if !ok { + return nil + } + + if v, ok := md[key]; ok { + return copyOf(v) + } + for k, v := range md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + if strings.ToLower(k) == key { + return copyOf(v) + } + } + return nil +} + +// the returned slice must not be modified in place +func copyOf(v []string) []string { + vals := make([]string, len(v)) + copy(vals, v) + return vals +} + // FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. // // Remember to perform strings.ToLower on the keys, for both the returned MD (MD @@ -222,15 +256,18 @@ func FromOutgoingContext(ctx context.Context) (MD, bool) { return nil, false } - out := MD{} + mdSize := len(raw.md) + for i := range raw.added { + mdSize += len(raw.added[i]) / 2 + } + + out := make(MD, mdSize) for k, v := range raw.md { // We need to manually convert all keys to lower case, because MD is a // map, and there's no guarantee that the MD attached to the context is // created using our helper functions. key := strings.ToLower(k) - s := make([]string, len(v)) - copy(s, v) - out[key] = s + out[key] = copyOf(v) } for _, added := range raw.added { if len(added)%2 == 1 { diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/picker_wrapper.go b/cluster-autoscaler/vendor/google.golang.org/grpc/picker_wrapper.go index 843633c910a1..a5d5516ee060 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/picker_wrapper.go @@ -26,6 +26,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/channelz" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/status" ) @@ -129,8 +130,12 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. if err == balancer.ErrNoSubConnAvailable { continue } - if _, ok := status.FromError(err); ok { + if st, ok := status.FromError(err); ok { // Status error: end the RPC unconditionally with this status. + // First restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err) + } return nil, nil, dropError{error: err} } // For all other errors, wait for ready RPCs should block and other diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/preloader.go b/cluster-autoscaler/vendor/google.golang.org/grpc/preloader.go index 0a1e975ad916..cd45547854f0 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/preloader.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/preloader.go @@ -25,7 +25,7 @@ import ( // PreparedMsg is responsible for creating a Marshalled and Compressed object. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/resolver/resolver.go b/cluster-autoscaler/vendor/google.golang.org/grpc/resolver/resolver.go index ca2e35a3596f..967cbc7373ab 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/resolver/resolver.go @@ -96,7 +96,7 @@ const ( // Address represents a server the client connects to. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -236,12 +236,12 @@ type ClientConn interface { // // Examples: // -// - "dns://some_authority/foo.bar" -// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} -// - "foo.bar" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} -// - "unknown_scheme://authority/endpoint" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} +// - "dns://some_authority/foo.bar" +// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// - "foo.bar" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} +// - "unknown_scheme://authority/endpoint" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} type Target struct { // Deprecated: use URL.Scheme instead. Scheme string diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/rpc_util.go b/cluster-autoscaler/vendor/google.golang.org/grpc/rpc_util.go index 5d407b004b0e..934fc1aa015e 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/rpc_util.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/rpc_util.go @@ -198,7 +198,7 @@ func Header(md *metadata.MD) CallOption { // HeaderCallOption is a CallOption for collecting response header metadata. // The metadata field will be populated *after* the RPC completes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -220,7 +220,7 @@ func Trailer(md *metadata.MD) CallOption { // TrailerCallOption is a CallOption for collecting response trailer metadata. // The metadata field will be populated *after* the RPC completes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -242,7 +242,7 @@ func Peer(p *peer.Peer) CallOption { // PeerCallOption is a CallOption for collecting the identity of the remote // peer. The peer field will be populated *after* the RPC completes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -282,7 +282,7 @@ func FailFast(failFast bool) CallOption { // FailFastCallOption is a CallOption for indicating whether an RPC should fail // fast or not. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -305,7 +305,7 @@ func MaxCallRecvMsgSize(bytes int) CallOption { // MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message // size in bytes the client can receive. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -328,7 +328,7 @@ func MaxCallSendMsgSize(bytes int) CallOption { // MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message // size in bytes the client can send. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -351,7 +351,7 @@ func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { // PerRPCCredsCallOption is a CallOption that indicates the per-RPC // credentials to use for the call. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -369,7 +369,7 @@ func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} // sending the request. If WithCompressor is also set, UseCompressor has // higher priority. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -379,7 +379,7 @@ func UseCompressor(name string) CallOption { // CompressorCallOption is a CallOption that indicates the compressor to use. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -416,7 +416,7 @@ func CallContentSubtype(contentSubtype string) CallOption { // ContentSubtypeCallOption is a CallOption that indicates the content-subtype // used for marshaling messages. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -444,7 +444,7 @@ func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} // This function is provided for advanced users; prefer to use only // CallContentSubtype to select a registered codec instead. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -455,7 +455,7 @@ func ForceCodec(codec encoding.Codec) CallOption { // ForceCodecCallOption is a CallOption that indicates the codec used for // marshaling messages. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -480,7 +480,7 @@ func CallCustomCodec(codec Codec) CallOption { // CustomCodecCallOption is a CallOption that indicates the codec used for // marshaling messages. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -497,7 +497,7 @@ func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} // MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory // used for buffering this RPC's requests for retry purposes. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -508,7 +508,7 @@ func MaxRetryRPCBufferSize(bytes int) CallOption { // MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of // memory to be used for caching this RPC for retry purposes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -548,10 +548,11 @@ type parser struct { // format. The caller owns the returned msg memory. // // If there is an error, possible values are: -// * io.EOF, when no messages remain -// * io.ErrUnexpectedEOF -// * of type transport.ConnectionError -// * an error from the status package +// - io.EOF, when no messages remain +// - io.ErrUnexpectedEOF +// - of type transport.ConnectionError +// - an error from the status package +// // No other error values or types must be returned, which also means // that the underlying io.Reader must not return an incompatible // error. diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/server.go b/cluster-autoscaler/vendor/google.golang.org/grpc/server.go index 2ad9da7bfccf..f4dde72b41f8 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/server.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/server.go @@ -73,12 +73,14 @@ func init() { internal.DrainServerTransports = func(srv *Server, addr string) { srv.drainServerTransports(addr) } - internal.AddExtraServerOptions = func(opt ...ServerOption) { - extraServerOptions = opt + internal.AddGlobalServerOptions = func(opt ...ServerOption) { + extraServerOptions = append(extraServerOptions, opt...) } - internal.ClearExtraServerOptions = func() { + internal.ClearGlobalServerOptions = func() { extraServerOptions = nil } + internal.BinaryLogger = binaryLogger + internal.JoinServerOptions = newJoinServerOption } var statusOK = status.New(codes.OK, "") @@ -155,6 +157,7 @@ type serverOptions struct { streamInt StreamServerInterceptor chainUnaryInts []UnaryServerInterceptor chainStreamInts []StreamServerInterceptor + binaryLogger binarylog.Logger inTapHandle tap.ServerInHandle statsHandlers []stats.Handler maxConcurrentStreams uint32 @@ -214,6 +217,22 @@ func newFuncServerOption(f func(*serverOptions)) *funcServerOption { } } +// joinServerOption provides a way to combine arbitrary number of server +// options into one. +type joinServerOption struct { + opts []ServerOption +} + +func (mdo *joinServerOption) apply(do *serverOptions) { + for _, opt := range mdo.opts { + opt.apply(do) + } +} + +func newJoinServerOption(opts ...ServerOption) ServerOption { + return &joinServerOption{opts: opts} +} + // WriteBufferSize determines how much data can be batched before doing a write on the wire. // The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. // The default value for this buffer is 32KB. @@ -452,6 +471,14 @@ func StatsHandler(h stats.Handler) ServerOption { }) } +// binaryLogger returns a ServerOption that can set the binary logger for the +// server. +func binaryLogger(bl binarylog.Logger) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.binaryLogger = bl + }) +} + // UnknownServiceHandler returns a ServerOption that allows for adding a custom // unknown service handler. The provided method is a bidi-streaming RPC service // handler that will be invoked instead of returning the "unimplemented" gRPC @@ -1199,9 +1226,16 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } }() } - - binlog := binarylog.GetMethodLogger(stream.Method()) - if binlog != nil { + var binlogs []binarylog.MethodLogger + if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { + binlogs = append(binlogs, ml) + } + if s.opts.binaryLogger != nil { + if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { + binlogs = append(binlogs, ml) + } + } + if len(binlogs) != 0 { ctx := stream.Context() md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ @@ -1221,7 +1255,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if peer, ok := peer.FromContext(ctx); ok { logEntry.PeerAddr = peer.Addr } - binlog.Log(logEntry) + for _, binlog := range binlogs { + binlog.Log(logEntry) + } } // comp and cp are used for compression. decomp and dc are used for @@ -1261,7 +1297,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } var payInfo *payloadInfo - if len(shs) != 0 || binlog != nil { + if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} } d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) @@ -1287,10 +1323,13 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Length: len(d), }) } - if binlog != nil { - binlog.Log(&binarylog.ClientMessage{ + if len(binlogs) != 0 { + cm := &binarylog.ClientMessage{ Message: d, - }) + } + for _, binlog := range binlogs { + binlog.Log(cm) + } } if trInfo != nil { trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) @@ -1314,18 +1353,24 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if e := t.WriteStatus(stream, appStatus); e != nil { channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } - if binlog != nil { + if len(binlogs) != 0 { if h, _ := stream.Header(); h.Len() > 0 { // Only log serverHeader if there was header. Otherwise it can // be trailer only. - binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) + } + for _, binlog := range binlogs { + binlog.Log(sh) + } } - binlog.Log(&binarylog.ServerTrailer{ + st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, - }) + } + for _, binlog := range binlogs { + binlog.Log(st) + } } return appErr } @@ -1351,26 +1396,34 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) } } - if binlog != nil { + if len(binlogs) != 0 { h, _ := stream.Header() - binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) - binlog.Log(&binarylog.ServerTrailer{ + } + st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, - }) + } + for _, binlog := range binlogs { + binlog.Log(sh) + binlog.Log(st) + } } return err } - if binlog != nil { + if len(binlogs) != 0 { h, _ := stream.Header() - binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) - binlog.Log(&binarylog.ServerMessage{ + } + sm := &binarylog.ServerMessage{ Message: reply, - }) + } + for _, binlog := range binlogs { + binlog.Log(sh) + binlog.Log(sm) + } } if channelz.IsOn() { t.IncrMsgSent() @@ -1382,11 +1435,14 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // Should the logging be in WriteStatus? Should we ignore the WriteStatus // error or allow the stats handler to see it? err = t.WriteStatus(stream, statusOK) - if binlog != nil { - binlog.Log(&binarylog.ServerTrailer{ + if len(binlogs) != 0 { + st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, - }) + } + for _, binlog := range binlogs { + binlog.Log(st) + } } return err } @@ -1499,8 +1555,15 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp }() } - ss.binlog = binarylog.GetMethodLogger(stream.Method()) - if ss.binlog != nil { + if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { + ss.binlogs = append(ss.binlogs, ml) + } + if s.opts.binaryLogger != nil { + if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { + ss.binlogs = append(ss.binlogs, ml) + } + } + if len(ss.binlogs) != 0 { md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ Header: md, @@ -1519,7 +1582,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if peer, ok := peer.FromContext(ss.Context()); ok { logEntry.PeerAddr = peer.Addr } - ss.binlog.Log(logEntry) + for _, binlog := range ss.binlogs { + binlog.Log(logEntry) + } } // If dc is set and matches the stream's compression, use it. Otherwise, try @@ -1585,11 +1650,14 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.mu.Unlock() } t.WriteStatus(ss.s, appStatus) - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ServerTrailer{ + if len(ss.binlogs) != 0 { + st := &binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), Err: appErr, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(st) + } } // TODO: Should we log an error from WriteStatus here and below? return appErr @@ -1600,11 +1668,14 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.mu.Unlock() } err = t.WriteStatus(ss.s, statusOK) - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ServerTrailer{ + if len(ss.binlogs) != 0 { + st := &binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), Err: appErr, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(st) + } } return err } diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/cluster-autoscaler/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go index 73a2f926613e..35e7a20a04ba 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go @@ -19,7 +19,7 @@ // Package serviceconfig defines types and methods for operating on gRPC // service configs. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/status/status.go b/cluster-autoscaler/vendor/google.golang.org/grpc/status/status.go index 6d163b6e3842..623be39f26ba 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/status/status.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/status/status.go @@ -76,14 +76,14 @@ func FromProto(s *spb.Status) *Status { // FromError returns a Status representation of err. // -// - If err was produced by this package or implements the method `GRPCStatus() -// *Status`, the appropriate Status is returned. +// - If err was produced by this package or implements the method `GRPCStatus() +// *Status`, the appropriate Status is returned. // -// - If err is nil, a Status is returned with codes.OK and no message. +// - If err is nil, a Status is returned with codes.OK and no message. // -// - Otherwise, err is an error not compatible with this package. In this -// case, a Status is returned with codes.Unknown and err's Error() message, -// and ok is false. +// - Otherwise, err is an error not compatible with this package. In this +// case, a Status is returned with codes.Unknown and err's Error() message, +// and ok is false. func FromError(err error) (s *Status, ok bool) { if err == nil { return nil, true diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/stream.go b/cluster-autoscaler/vendor/google.golang.org/grpc/stream.go index 446a91e323ee..960c3e33dfd3 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/stream.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/stream.go @@ -39,6 +39,7 @@ import ( imetadata "google.golang.org/grpc/internal/metadata" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/serviceconfig" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -195,6 +196,13 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method} rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo) if err != nil { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "config selector returned illegal status: %v", err) + } + return nil, err + } return nil, toRPCErr(err) } @@ -301,7 +309,14 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client if !cc.dopts.disableRetry { cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) } - cs.binlog = binarylog.GetMethodLogger(method) + if ml := binarylog.GetMethodLogger(method); ml != nil { + cs.binlogs = append(cs.binlogs, ml) + } + if cc.dopts.binaryLogger != nil { + if ml := cc.dopts.binaryLogger.GetMethodLogger(method); ml != nil { + cs.binlogs = append(cs.binlogs, ml) + } + } // Pick the transport to use and create a new stream on the transport. // Assign cs.attempt upon success. @@ -322,7 +337,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client return nil, err } - if cs.binlog != nil { + if len(cs.binlogs) != 0 { md, _ := metadata.FromOutgoingContext(ctx) logEntry := &binarylog.ClientHeader{ OnClientSide: true, @@ -336,7 +351,9 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client logEntry.Timeout = 0 } } - cs.binlog.Log(logEntry) + for _, binlog := range cs.binlogs { + binlog.Log(logEntry) + } } if desc != unaryStreamDesc { @@ -480,7 +497,7 @@ type clientStream struct { retryThrottler *retryThrottler // The throttler active when the RPC began. - binlog binarylog.MethodLogger // Binary logger, can be nil. + binlogs []binarylog.MethodLogger // serverHeaderBinlogged is a boolean for whether server header has been // logged. Server header will be logged when the first time one of those // happens: stream.Header(), stream.Recv(). @@ -735,17 +752,25 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) func (cs *clientStream) Header() (metadata.MD, error) { var m metadata.MD + noHeader := false err := cs.withRetry(func(a *csAttempt) error { var err error m, err = a.s.Header() + if err == transport.ErrNoHeaders { + noHeader = true + return nil + } return toRPCErr(err) }, cs.commitAttemptLocked) + if err != nil { cs.finish(err) return nil, err } - if cs.binlog != nil && !cs.serverHeaderBinlogged { - // Only log if binary log is on and header has not been logged. + + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && !noHeader { + // Only log if binary log is on and header has not been logged, and + // there is actually headers to log. logEntry := &binarylog.ServerHeader{ OnClientSide: true, Header: m, @@ -754,8 +779,10 @@ func (cs *clientStream) Header() (metadata.MD, error) { if peer, ok := peer.FromContext(cs.Context()); ok { logEntry.PeerAddr = peer.Addr } - cs.binlog.Log(logEntry) cs.serverHeaderBinlogged = true + for _, binlog := range cs.binlogs { + binlog.Log(logEntry) + } } return m, nil } @@ -829,38 +856,44 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { return a.sendMsg(m, hdr, payload, data) } err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) - if cs.binlog != nil && err == nil { - cs.binlog.Log(&binarylog.ClientMessage{ + if len(cs.binlogs) != 0 && err == nil { + cm := &binarylog.ClientMessage{ OnClientSide: true, Message: data, - }) + } + for _, binlog := range cs.binlogs { + binlog.Log(cm) + } } return err } func (cs *clientStream) RecvMsg(m interface{}) error { - if cs.binlog != nil && !cs.serverHeaderBinlogged { + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { // Call Header() to binary log header if it's not already logged. cs.Header() } var recvInfo *payloadInfo - if cs.binlog != nil { + if len(cs.binlogs) != 0 { recvInfo = &payloadInfo{} } err := cs.withRetry(func(a *csAttempt) error { return a.recvMsg(m, recvInfo) }, cs.commitAttemptLocked) - if cs.binlog != nil && err == nil { - cs.binlog.Log(&binarylog.ServerMessage{ + if len(cs.binlogs) != 0 && err == nil { + sm := &binarylog.ServerMessage{ OnClientSide: true, Message: recvInfo.uncompressedBytes, - }) + } + for _, binlog := range cs.binlogs { + binlog.Log(sm) + } } if err != nil || !cs.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. cs.finish(err) - if cs.binlog != nil { + if len(cs.binlogs) != 0 { // finish will not log Trailer. Log Trailer here. logEntry := &binarylog.ServerTrailer{ OnClientSide: true, @@ -873,7 +906,9 @@ func (cs *clientStream) RecvMsg(m interface{}) error { if peer, ok := peer.FromContext(cs.Context()); ok { logEntry.PeerAddr = peer.Addr } - cs.binlog.Log(logEntry) + for _, binlog := range cs.binlogs { + binlog.Log(logEntry) + } } } return err @@ -894,10 +929,13 @@ func (cs *clientStream) CloseSend() error { return nil } cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) - if cs.binlog != nil { - cs.binlog.Log(&binarylog.ClientHalfClose{ + if len(cs.binlogs) != 0 { + chc := &binarylog.ClientHalfClose{ OnClientSide: true, - }) + } + for _, binlog := range cs.binlogs { + binlog.Log(chc) + } } // We never returned an error here for reasons. return nil @@ -930,10 +968,13 @@ func (cs *clientStream) finish(err error) { // // Only one of cancel or trailer needs to be logged. In the cases where // users don't call RecvMsg, users must have already canceled the RPC. - if cs.binlog != nil && status.Code(err) == codes.Canceled { - cs.binlog.Log(&binarylog.Cancel{ + if len(cs.binlogs) != 0 && status.Code(err) == codes.Canceled { + c := &binarylog.Cancel{ OnClientSide: true, - }) + } + for _, binlog := range cs.binlogs { + binlog.Log(c) + } } if err == nil { cs.retryThrottler.successfulRPC() @@ -1005,6 +1046,7 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { } return io.EOF // indicates successful end of stream. } + return toRPCErr(err) } if a.trInfo != nil { @@ -1453,7 +1495,7 @@ type serverStream struct { statsHandler []stats.Handler - binlog binarylog.MethodLogger + binlogs []binarylog.MethodLogger // serverHeaderBinlogged indicates whether server header has been logged. It // will happen when one of the following two happens: stream.SendHeader(), // stream.Send(). @@ -1487,12 +1529,15 @@ func (ss *serverStream) SendHeader(md metadata.MD) error { } err = ss.t.WriteHeader(ss.s, md) - if ss.binlog != nil && !ss.serverHeaderBinlogged { + if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged { h, _ := ss.s.Header() - ss.binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) + } ss.serverHeaderBinlogged = true + for _, binlog := range ss.binlogs { + binlog.Log(sh) + } } return err } @@ -1549,17 +1594,23 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { return toRPCErr(err) } - if ss.binlog != nil { + if len(ss.binlogs) != 0 { if !ss.serverHeaderBinlogged { h, _ := ss.s.Header() - ss.binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) + } ss.serverHeaderBinlogged = true + for _, binlog := range ss.binlogs { + binlog.Log(sh) + } } - ss.binlog.Log(&binarylog.ServerMessage{ + sm := &binarylog.ServerMessage{ Message: data, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(sm) + } } if len(ss.statsHandler) != 0 { for _, sh := range ss.statsHandler { @@ -1598,13 +1649,16 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { } }() var payInfo *payloadInfo - if len(ss.statsHandler) != 0 || ss.binlog != nil { + if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { payInfo = &payloadInfo{} } if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { if err == io.EOF { - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ClientHalfClose{}) + if len(ss.binlogs) != 0 { + chc := &binarylog.ClientHalfClose{} + for _, binlog := range ss.binlogs { + binlog.Log(chc) + } } return err } @@ -1625,10 +1679,13 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { }) } } - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ClientMessage{ + if len(ss.binlogs) != 0 { + cm := &binarylog.ClientMessage{ Message: payInfo.uncompressedBytes, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(cm) + } } return nil } diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/tap/tap.go b/cluster-autoscaler/vendor/google.golang.org/grpc/tap/tap.go index dbf34e6bb5f5..bfa5dfa40e4d 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/tap/tap.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/tap/tap.go @@ -19,7 +19,7 @@ // Package tap defines the function handles which are executed on the transport // layer of gRPC-Go and related information. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/version.go b/cluster-autoscaler/vendor/google.golang.org/grpc/version.go index 8934f06bc065..2198e7098d7d 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/version.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.49.0" +const Version = "1.51.0" diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/vet.sh b/cluster-autoscaler/vendor/google.golang.org/grpc/vet.sh index c3fc8253b13a..bd8e0cdb33c8 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/vet.sh +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/vet.sh @@ -67,7 +67,9 @@ elif [[ "$#" -ne 0 ]]; then fi # - Ensure all source files contain a copyright message. -not git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go' +# (Done in two parts because Darwin "git grep" has broken support for compound +# exclusion matches.) +(grep -L "DO NOT EDIT" $(git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)" -- '*.go') || true) | fail_on_output # - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. not grep 'func Test[^(]' *_test.go @@ -81,7 +83,7 @@ not git grep -l 'x/net/context' -- "*.go" git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' # - Do not call grpclog directly. Use grpclog.Component instead. -git grep -l 'grpclog.I\|grpclog.W\|grpclog.E\|grpclog.F\|grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' +git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' # - Ensure all ptypes proto packages are renamed when importing. not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" diff --git a/cluster-autoscaler/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go index f81594c9123a..1395a7e107bf 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AdmissionRequest = map[string]string{ diff --git a/cluster-autoscaler/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go index 13067ad80d58..82598ed57309 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AdmissionRequest = map[string]string{ diff --git a/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go index ba92729c3c5a..958636f70060 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_MutatingWebhook = map[string]string{ diff --git a/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go index a670bb206da6..dc7df4b15db0 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_MatchResources = map[string]string{ diff --git a/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go index c57c5b7fa8c8..1a6c77e4a890 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_MutatingWebhook = map[string]string{ diff --git a/cluster-autoscaler/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go index 6de93420069e..3b75fa65bc33 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ServerStorageVersion = map[string]string{ diff --git a/cluster-autoscaler/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go index 509bb11c50fd..920e56a5294b 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ControllerRevision = map[string]string{ diff --git a/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go index 00d6d1825291..7c963f640dc8 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ControllerRevision = map[string]string{ diff --git a/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go index 1936a2467262..9caf202230b6 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta2 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ControllerRevision = map[string]string{ diff --git a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go index 5d37ac1f8dca..b1a730b816ee 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_BoundObjectReference = map[string]string{ diff --git a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1alpha1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1alpha1/types_swagger_doc_generated.go index bc17c5f30dbf..e2726d9d8001 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1alpha1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1alpha1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_SelfSubjectReview = map[string]string{ diff --git a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go index 1086955c3a87..9c97b2a2eab2 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_TokenReview = map[string]string{ diff --git a/cluster-autoscaler/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go index 2e5fbea7ad64..93229485cc00 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_LocalSubjectAccessReview = map[string]string{ diff --git a/cluster-autoscaler/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go index 2d291189eb20..e0846be7a4d5 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_LocalSubjectAccessReview = map[string]string{ diff --git a/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go index ca288e912317..47179894edd2 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ContainerResourceMetricSource = map[string]string{ diff --git a/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go index 41ab32a4c781..204cfd395bed 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v2 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ContainerResourceMetricSource = map[string]string{ diff --git a/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go index 6f555487dca8..ad1939191958 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v2beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ContainerResourceMetricSource = map[string]string{ diff --git a/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go index cb92e9e34537..c1d5bbe6a080 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v2beta2 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ContainerResourceMetricSource = map[string]string{ diff --git a/cluster-autoscaler/vendor/k8s.io/api/batch/v1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/batch/v1/generated.proto index 74ccac921f07..a9c9a37d5f93 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/batch/v1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/batch/v1/generated.proto @@ -83,6 +83,7 @@ message CronJobSpec { // Specifies how to treat concurrent executions of a Job. // Valid values are: + // // - "Allow" (default): allows CronJobs to run concurrently; // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; // - "Replace": cancels currently running job and replaces it with a new one @@ -189,7 +190,7 @@ message JobSpec { optional int32 parallelism = 1; // Specifies the desired number of successfully finished pods the - // job should be run with. Setting to nil means that the success of any + // job should be run with. Setting to null means that the success of any // pod signals the success of all pods, and allows parallelism to have any positive // value. Setting to 1 means that parallelism is limited to 1 and the success of that // pod signals the success of the job. @@ -256,7 +257,7 @@ message JobSpec { // +optional optional int32 ttlSecondsAfterFinished = 8; - // CompletionMode specifies how Pod completions are tracked. It can be + // completionMode specifies how Pod completions are tracked. It can be // `NonIndexed` (default) or `Indexed`. // // `NonIndexed` means that the Job is considered complete when there have @@ -281,7 +282,7 @@ message JobSpec { // +optional optional string completionMode = 9; - // Suspend specifies whether the Job controller should create Pods or not. If + // suspend specifies whether the Job controller should create Pods or not. If // a Job is created with suspend set to true, no Pods are created by the Job // controller. If a Job is suspended after creation (i.e. the flag goes from // false to true), the Job controller will delete all active Pods associated @@ -334,7 +335,7 @@ message JobStatus { // +optional optional int32 failed = 6; - // CompletedIndexes holds the completed indexes when .spec.completionMode = + // completedIndexes holds the completed indexes when .spec.completionMode = // "Indexed" in a text format. The indexes are represented as decimal integers // separated by commas. The numbers are listed in increasing order. Three or // more consecutive numbers are compressed and represented by the first and @@ -344,15 +345,16 @@ message JobStatus { // +optional optional string completedIndexes = 7; - // UncountedTerminatedPods holds the UIDs of Pods that have terminated but + // uncountedTerminatedPods holds the UIDs of Pods that have terminated but // the job controller hasn't yet accounted for in the status counters. // // The job controller creates pods with a finalizer. When a pod terminates // (succeeded or failed), the controller does three steps to account for it // in the job status: - // (1) Add the pod UID to the arrays in this field. - // (2) Remove the pod finalizer. - // (3) Remove the pod UID from the arrays while increasing the corresponding + // + // 1. Add the pod UID to the arrays in this field. + // 2. Remove the pod finalizer. + // 3. Remove the pod UID from the arrays while increasing the corresponding // counter. // // Old jobs might not be tracked using this field, in which case the field @@ -409,6 +411,7 @@ message PodFailurePolicyOnExitCodesRequirement { // Represents the relationship between the container exit code(s) and the // specified values. Containers completed with success (exit code 0) are // excluded from the requirement check. Possible values are: + // // - In: the requirement is satisfied if at least one container exit code // (might be multiple if there are multiple containers not restricted // by the 'containerName' field) is in the set of specified values. @@ -442,10 +445,11 @@ message PodFailurePolicyOnPodConditionsPattern { } // PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. -// One of OnExitCodes and onPodConditions, but not both, can be used in each rule. +// One of onExitCodes and onPodConditions, but not both, can be used in each rule. message PodFailurePolicyRule { // Specifies the action taken on a pod failure when the requirements are satisfied. // Possible values are: + // // - FailJob: indicates that the pod's job is marked as Failed and all // running pods are terminated. // - Ignore: indicates that the counter towards the .backoffLimit is not @@ -470,12 +474,12 @@ message PodFailurePolicyRule { // UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't // been accounted in Job status counters. message UncountedTerminatedPods { - // Succeeded holds UIDs of succeeded Pods. + // succeeded holds UIDs of succeeded Pods. // +listType=set // +optional repeated string succeeded = 1; - // Failed holds UIDs of failed Pods. + // failed holds UIDs of failed Pods. // +listType=set // +optional repeated string failed = 2; diff --git a/cluster-autoscaler/vendor/k8s.io/api/batch/v1/types.go b/cluster-autoscaler/vendor/k8s.io/api/batch/v1/types.go index dcb15728f994..169d70322779 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/batch/v1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/batch/v1/types.go @@ -135,6 +135,7 @@ type PodFailurePolicyOnExitCodesRequirement struct { // Represents the relationship between the container exit code(s) and the // specified values. Containers completed with success (exit code 0) are // excluded from the requirement check. Possible values are: + // // - In: the requirement is satisfied if at least one container exit code // (might be multiple if there are multiple containers not restricted // by the 'containerName' field) is in the set of specified values. @@ -168,10 +169,11 @@ type PodFailurePolicyOnPodConditionsPattern struct { } // PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. -// One of OnExitCodes and onPodConditions, but not both, can be used in each rule. +// One of onExitCodes and onPodConditions, but not both, can be used in each rule. type PodFailurePolicyRule struct { // Specifies the action taken on a pod failure when the requirements are satisfied. // Possible values are: + // // - FailJob: indicates that the pod's job is marked as Failed and all // running pods are terminated. // - Ignore: indicates that the counter towards the .backoffLimit is not @@ -216,7 +218,7 @@ type JobSpec struct { Parallelism *int32 `json:"parallelism,omitempty" protobuf:"varint,1,opt,name=parallelism"` // Specifies the desired number of successfully finished pods the - // job should be run with. Setting to nil means that the success of any + // job should be run with. Setting to null means that the success of any // pod signals the success of all pods, and allows parallelism to have any positive // value. Setting to 1 means that parallelism is limited to 1 and the success of that // pod signals the success of the job. @@ -288,7 +290,7 @@ type JobSpec struct { // +optional TTLSecondsAfterFinished *int32 `json:"ttlSecondsAfterFinished,omitempty" protobuf:"varint,8,opt,name=ttlSecondsAfterFinished"` - // CompletionMode specifies how Pod completions are tracked. It can be + // completionMode specifies how Pod completions are tracked. It can be // `NonIndexed` (default) or `Indexed`. // // `NonIndexed` means that the Job is considered complete when there have @@ -313,7 +315,7 @@ type JobSpec struct { // +optional CompletionMode *CompletionMode `json:"completionMode,omitempty" protobuf:"bytes,9,opt,name=completionMode,casttype=CompletionMode"` - // Suspend specifies whether the Job controller should create Pods or not. If + // suspend specifies whether the Job controller should create Pods or not. If // a Job is created with suspend set to true, no Pods are created by the Job // controller. If a Job is suspended after creation (i.e. the flag goes from // false to true), the Job controller will delete all active Pods associated @@ -366,7 +368,7 @@ type JobStatus struct { // +optional Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"` - // CompletedIndexes holds the completed indexes when .spec.completionMode = + // completedIndexes holds the completed indexes when .spec.completionMode = // "Indexed" in a text format. The indexes are represented as decimal integers // separated by commas. The numbers are listed in increasing order. Three or // more consecutive numbers are compressed and represented by the first and @@ -376,15 +378,16 @@ type JobStatus struct { // +optional CompletedIndexes string `json:"completedIndexes,omitempty" protobuf:"bytes,7,opt,name=completedIndexes"` - // UncountedTerminatedPods holds the UIDs of Pods that have terminated but + // uncountedTerminatedPods holds the UIDs of Pods that have terminated but // the job controller hasn't yet accounted for in the status counters. // // The job controller creates pods with a finalizer. When a pod terminates // (succeeded or failed), the controller does three steps to account for it // in the job status: - // (1) Add the pod UID to the arrays in this field. - // (2) Remove the pod finalizer. - // (3) Remove the pod UID from the arrays while increasing the corresponding + // + // 1. Add the pod UID to the arrays in this field. + // 2. Remove the pod finalizer. + // 3. Remove the pod UID from the arrays while increasing the corresponding // counter. // // Old jobs might not be tracked using this field, in which case the field @@ -403,12 +406,12 @@ type JobStatus struct { // UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't // been accounted in Job status counters. type UncountedTerminatedPods struct { - // Succeeded holds UIDs of succeeded Pods. + // succeeded holds UIDs of succeeded Pods. // +listType=set // +optional Succeeded []types.UID `json:"succeeded,omitempty" protobuf:"bytes,1,rep,name=succeeded,casttype=k8s.io/apimachinery/pkg/types.UID"` - // Failed holds UIDs of failed Pods. + // failed holds UIDs of failed Pods. // +listType=set // +optional Failed []types.UID `json:"failed,omitempty" protobuf:"bytes,2,rep,name=failed,casttype=k8s.io/apimachinery/pkg/types.UID"` @@ -524,6 +527,7 @@ type CronJobSpec struct { // Specifies how to treat concurrent executions of a Job. // Valid values are: + // // - "Allow" (default): allows CronJobs to run concurrently; // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; // - "Replace": cancels currently running job and replaces it with a new one diff --git a/cluster-autoscaler/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go index 89470dcc6771..3d938d87c44d 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CronJob = map[string]string{ @@ -53,7 +53,7 @@ var map_CronJobSpec = map[string]string{ "schedule": "The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", "timeZone": "The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. The set of valid time zone names and the time zone offset is loaded from the system-wide time zone database by the API server during CronJob validation and the controller manager during execution. If no system-wide time zone database can be found a bundled version of the database is used instead. If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host configuration, the controller will stop creating new new Jobs and will create a system event with the reason UnknownTimeZone. More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones This is beta field and must be enabled via the `CronJobTimeZone` feature gate.", "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", - "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", + "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are:\n\n- \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", "suspend": "This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", "jobTemplate": "Specifies the job that will be created when executing a CronJob.", "successfulJobsHistoryLimit": "The number of successful finished jobs to retain. Value must be non-negative integer. Defaults to 3.", @@ -113,7 +113,7 @@ func (JobList) SwaggerDoc() map[string]string { var map_JobSpec = map[string]string{ "": "JobSpec describes how the job execution will look like.", "parallelism": "Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", - "completions": "Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "completions": "Specifies the desired number of successfully finished pods the job should be run with. Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.", "podFailurePolicy": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.\n\nThis field is alpha-level. To use this field, you must enable the `JobPodFailurePolicy` feature gate (disabled by default).", "backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6", @@ -121,8 +121,8 @@ var map_JobSpec = map[string]string{ "manualSelector": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector", "template": "Describes the pod that will be created when executing a job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "ttlSecondsAfterFinished": "ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.", - "completionMode": "CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.", - "suspend": "Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.", + "completionMode": "completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.", + "suspend": "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.", } func (JobSpec) SwaggerDoc() map[string]string { @@ -137,8 +137,8 @@ var map_JobStatus = map[string]string{ "active": "The number of pending and running pods.", "succeeded": "The number of pods which reached phase Succeeded.", "failed": "The number of pods which reached phase Failed.", - "completedIndexes": "CompletedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".", - "uncountedTerminatedPods": "UncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status: (1) Add the pod UID to the arrays in this field. (2) Remove the pod finalizer. (3) Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null.", + "completedIndexes": "completedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".", + "uncountedTerminatedPods": "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null.", "ready": "The number of pods which have a Ready condition.\n\nThis field is beta-level. The job controller populates the field when the feature gate JobReadyPods is enabled (enabled by default).", } @@ -168,7 +168,7 @@ func (PodFailurePolicy) SwaggerDoc() map[string]string { var map_PodFailurePolicyOnExitCodesRequirement = map[string]string{ "": "PodFailurePolicyOnExitCodesRequirement describes the requirement for handling a failed pod based on its container exit codes. In particular, it lookups the .state.terminated.exitCode for each app container and init container status, represented by the .status.containerStatuses and .status.initContainerStatuses fields in the Pod status, respectively. Containers completed with success (exit code 0) are excluded from the requirement check.", "containerName": "Restricts the check for exit codes to the container with the specified name. When null, the rule applies to all containers. When specified, it should match one the container or initContainer names in the pod template.", - "operator": "Represents the relationship between the container exit code(s) and the specified values. Containers completed with success (exit code 0) are excluded from the requirement check. Possible values are: - In: the requirement is satisfied if at least one container exit code\n (might be multiple if there are multiple containers not restricted\n by the 'containerName' field) is in the set of specified values.\n- NotIn: the requirement is satisfied if at least one container exit code\n (might be multiple if there are multiple containers not restricted\n by the 'containerName' field) is not in the set of specified values.\nAdditional values are considered to be added in the future. Clients should react to an unknown operator by assuming the requirement is not satisfied.", + "operator": "Represents the relationship between the container exit code(s) and the specified values. Containers completed with success (exit code 0) are excluded from the requirement check. Possible values are:\n\n- In: the requirement is satisfied if at least one container exit code\n (might be multiple if there are multiple containers not restricted\n by the 'containerName' field) is in the set of specified values.\n- NotIn: the requirement is satisfied if at least one container exit code\n (might be multiple if there are multiple containers not restricted\n by the 'containerName' field) is not in the set of specified values.\nAdditional values are considered to be added in the future. Clients should react to an unknown operator by assuming the requirement is not satisfied.", "values": "Specifies the set of values. Each returned container exit code (might be multiple in case of multiple containers) is checked against this set of values with respect to the operator. The list of values must be ordered and must not contain duplicates. Value '0' cannot be used for the In operator. At least one element is required. At most 255 elements are allowed.", } @@ -187,8 +187,8 @@ func (PodFailurePolicyOnPodConditionsPattern) SwaggerDoc() map[string]string { } var map_PodFailurePolicyRule = map[string]string{ - "": "PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. One of OnExitCodes and onPodConditions, but not both, can be used in each rule.", - "action": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are: - FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.", + "": "PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. One of onExitCodes and onPodConditions, but not both, can be used in each rule.", + "action": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.", "onExitCodes": "Represents the requirement on the container exit codes.", "onPodConditions": "Represents the requirement on the pod conditions. The requirement is represented as a list of pod condition patterns. The requirement is satisfied if at least one pattern matches an actual pod condition. At most 20 elements are allowed.", } @@ -199,8 +199,8 @@ func (PodFailurePolicyRule) SwaggerDoc() map[string]string { var map_UncountedTerminatedPods = map[string]string{ "": "UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't been accounted in Job status counters.", - "succeeded": "Succeeded holds UIDs of succeeded Pods.", - "failed": "Failed holds UIDs of failed Pods.", + "succeeded": "succeeded holds UIDs of succeeded Pods.", + "failed": "failed holds UIDs of failed Pods.", } func (UncountedTerminatedPods) SwaggerDoc() map[string]string { diff --git a/cluster-autoscaler/vendor/k8s.io/api/batch/v1beta1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/batch/v1beta1/generated.proto index d8386a8f51dc..d19647c41202 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/batch/v1beta1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/batch/v1beta1/generated.proto @@ -84,6 +84,7 @@ message CronJobSpec { // Specifies how to treat concurrent executions of a Job. // Valid values are: + // // - "Allow" (default): allows CronJobs to run concurrently; // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; // - "Replace": cancels currently running job and replaces it with a new one diff --git a/cluster-autoscaler/vendor/k8s.io/api/batch/v1beta1/types.go b/cluster-autoscaler/vendor/k8s.io/api/batch/v1beta1/types.go index 4c0d69dd6b0e..9a3fff5840bc 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/batch/v1beta1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/batch/v1beta1/types.go @@ -124,6 +124,7 @@ type CronJobSpec struct { // Specifies how to treat concurrent executions of a Job. // Valid values are: + // // - "Allow" (default): allows CronJobs to run concurrently; // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; // - "Replace": cancels currently running job and replaces it with a new one diff --git a/cluster-autoscaler/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go index 5716bbb862af..63b009d1d68f 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CronJob = map[string]string{ @@ -53,7 +53,7 @@ var map_CronJobSpec = map[string]string{ "schedule": "The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", "timeZone": "The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. The set of valid time zone names and the time zone offset is loaded from the system-wide time zone database by the API server during CronJob validation and the controller manager during execution. If no system-wide time zone database can be found a bundled version of the database is used instead. If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host configuration, the controller will stop creating new new Jobs and will create a system event with the reason UnknownTimeZone. More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones This is beta field and must be enabled via the `CronJobTimeZone` feature gate.", "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", - "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", + "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are:\n\n- \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", "suspend": "This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", "jobTemplate": "Specifies the job that will be created when executing a CronJob.", "successfulJobsHistoryLimit": "The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 3.", diff --git a/cluster-autoscaler/vendor/k8s.io/api/certificates/v1/types.go b/cluster-autoscaler/vendor/k8s.io/api/certificates/v1/types.go index af5efb5165b8..92b2018e761a 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/certificates/v1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/certificates/v1/types.go @@ -274,8 +274,9 @@ type CertificateSigningRequestList struct { } // KeyUsage specifies valid usage contexts for keys. -// See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 +// See: // +// https://tools.ietf.org/html/rfc5280#section-4.2.1.3 // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 // // +enum diff --git a/cluster-autoscaler/vendor/k8s.io/api/certificates/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/certificates/v1/types_swagger_doc_generated.go index 0dc8a4c69b31..4bdf39ebb3e4 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/certificates/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/certificates/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CertificateSigningRequest = map[string]string{ diff --git a/cluster-autoscaler/vendor/k8s.io/api/certificates/v1beta1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/certificates/v1beta1/generated.proto index e246fba021c4..f70f01ef7ab5 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/certificates/v1beta1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/certificates/v1beta1/generated.proto @@ -124,8 +124,10 @@ message CertificateSigningRequestSpec { // allowedUsages specifies a set of usage contexts the key will be // valid for. - // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 - // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + // See: + // https://tools.ietf.org/html/rfc5280#section-4.2.1.3 + // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + // // Valid values are: // "signing", // "digital signature", diff --git a/cluster-autoscaler/vendor/k8s.io/api/certificates/v1beta1/types.go b/cluster-autoscaler/vendor/k8s.io/api/certificates/v1beta1/types.go index fe7aab9704d0..7e5a5c198a61 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/certificates/v1beta1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/certificates/v1beta1/types.go @@ -89,8 +89,10 @@ type CertificateSigningRequestSpec struct { // allowedUsages specifies a set of usage contexts the key will be // valid for. - // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 - // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + // See: + // https://tools.ietf.org/html/rfc5280#section-4.2.1.3 + // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + // // Valid values are: // "signing", // "digital signature", @@ -229,8 +231,9 @@ type CertificateSigningRequestList struct { } // KeyUsages specifies valid usage contexts for keys. -// See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 +// See: // +// https://tools.ietf.org/html/rfc5280#section-4.2.1.3 // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 type KeyUsage string diff --git a/cluster-autoscaler/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go index d3f318150cbe..f9ab1f13de97 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CertificateSigningRequest = map[string]string{ @@ -55,7 +55,7 @@ var map_CertificateSigningRequestSpec = map[string]string{ "request": "Base64-encoded PKCS#10 CSR data", "signerName": "Requested signer for the request. It is a qualified name in the form: `scope-hostname.io/name`. If empty, it will be defaulted:\n 1. If it's a kubelet client certificate, it is assigned\n \"kubernetes.io/kube-apiserver-client-kubelet\".\n 2. If it's a kubelet serving certificate, it is assigned\n \"kubernetes.io/kubelet-serving\".\n 3. Otherwise, it is assigned \"kubernetes.io/legacy-unknown\".\nDistribution of trust for signers happens out of band. You can select on this field using `spec.signerName`.", "expirationSeconds": "expirationSeconds is the requested duration of validity of the issued certificate. The certificate signer may issue a certificate with a different validity duration so a client must check the delta between the notBefore and and notAfter fields in the issued certificate to determine the actual duration.\n\nThe v1.22+ in-tree implementations of the well-known Kubernetes signers will honor this field as long as the requested duration is not greater than the maximum duration they will honor per the --cluster-signing-duration CLI flag to the Kubernetes controller manager.\n\nCertificate signers may not honor this field for various reasons:\n\n 1. Old signer that is unaware of the field (such as the in-tree\n implementations prior to v1.22)\n 2. Signer whose configured maximum is shorter than the requested duration\n 3. Signer whose configured minimum is longer than the requested duration\n\nThe minimum valid value for expirationSeconds is 600, i.e. 10 minutes.", - "usages": "allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3\n https://tools.ietf.org/html/rfc5280#section-4.2.1.12\nValid values are:\n \"signing\",\n \"digital signature\",\n \"content commitment\",\n \"key encipherment\",\n \"key agreement\",\n \"data encipherment\",\n \"cert sign\",\n \"crl sign\",\n \"encipher only\",\n \"decipher only\",\n \"any\",\n \"server auth\",\n \"client auth\",\n \"code signing\",\n \"email protection\",\n \"s/mime\",\n \"ipsec end system\",\n \"ipsec tunnel\",\n \"ipsec user\",\n \"timestamping\",\n \"ocsp signing\",\n \"microsoft sgc\",\n \"netscape sgc\"", + "usages": "allowedUsages specifies a set of usage contexts the key will be valid for. See:\n\thttps://tools.ietf.org/html/rfc5280#section-4.2.1.3\n\thttps://tools.ietf.org/html/rfc5280#section-4.2.1.12\n\nValid values are:\n \"signing\",\n \"digital signature\",\n \"content commitment\",\n \"key encipherment\",\n \"key agreement\",\n \"data encipherment\",\n \"cert sign\",\n \"crl sign\",\n \"encipher only\",\n \"decipher only\",\n \"any\",\n \"server auth\",\n \"client auth\",\n \"code signing\",\n \"email protection\",\n \"s/mime\",\n \"ipsec end system\",\n \"ipsec tunnel\",\n \"ipsec user\",\n \"timestamping\",\n \"ocsp signing\",\n \"microsoft sgc\",\n \"netscape sgc\"", "username": "Information about the requesting user. See user.Info interface for details.", "uid": "UID information about the requesting user. See user.Info interface for details.", "groups": "Group information about the requesting user. See user.Info interface for details.", diff --git a/cluster-autoscaler/vendor/k8s.io/api/coordination/v1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/coordination/v1/generated.proto index b1efb737f0c4..36fce60f2de7 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/coordination/v1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/coordination/v1/generated.proto @@ -34,7 +34,7 @@ message Lease { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the Lease. + // spec contains the specification of the Lease. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional LeaseSpec spec = 2; @@ -47,7 +47,7 @@ message LeaseList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated Lease items = 2; } @@ -59,7 +59,7 @@ message LeaseSpec { // leaseDurationSeconds is a duration that candidates for a lease need // to wait to force acquire it. This is measure against time of last - // observed RenewTime. + // observed renewTime. // +optional optional int32 leaseDurationSeconds = 2; diff --git a/cluster-autoscaler/vendor/k8s.io/api/coordination/v1/types.go b/cluster-autoscaler/vendor/k8s.io/api/coordination/v1/types.go index 7a5605ace171..b0e1d0682961 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/coordination/v1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/coordination/v1/types.go @@ -30,7 +30,7 @@ type Lease struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the Lease. + // spec contains the specification of the Lease. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec LeaseSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -43,7 +43,7 @@ type LeaseSpec struct { HolderIdentity *string `json:"holderIdentity,omitempty" protobuf:"bytes,1,opt,name=holderIdentity"` // leaseDurationSeconds is a duration that candidates for a lease need // to wait to force acquire it. This is measure against time of last - // observed RenewTime. + // observed renewTime. // +optional LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty" protobuf:"varint,2,opt,name=leaseDurationSeconds"` // acquireTime is a time when the current lease was acquired. @@ -69,6 +69,6 @@ type LeaseList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []Lease `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/cluster-autoscaler/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go index 0f14404308c7..f3720eca0271 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go @@ -24,13 +24,13 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Lease = map[string]string{ "": "Lease defines a lease concept.", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec contains the specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Lease) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (Lease) SwaggerDoc() map[string]string { var map_LeaseList = map[string]string{ "": "LeaseList is a list of Lease objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (LeaseList) SwaggerDoc() map[string]string { @@ -50,7 +50,7 @@ func (LeaseList) SwaggerDoc() map[string]string { var map_LeaseSpec = map[string]string{ "": "LeaseSpec is a specification of a Lease.", "holderIdentity": "holderIdentity contains the identity of the holder of a current lease.", - "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime.", + "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed renewTime.", "acquireTime": "acquireTime is a time when the current lease was acquired.", "renewTime": "renewTime is a time when the current holder of a lease has last updated the lease.", "leaseTransitions": "leaseTransitions is the number of transitions of a lease between holders.", diff --git a/cluster-autoscaler/vendor/k8s.io/api/coordination/v1beta1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/coordination/v1beta1/generated.proto index 85faa3b09b44..92c8918b801c 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/coordination/v1beta1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/coordination/v1beta1/generated.proto @@ -34,7 +34,7 @@ message Lease { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the Lease. + // spec contains the specification of the Lease. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional LeaseSpec spec = 2; @@ -47,7 +47,7 @@ message LeaseList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated Lease items = 2; } @@ -59,7 +59,7 @@ message LeaseSpec { // leaseDurationSeconds is a duration that candidates for a lease need // to wait to force acquire it. This is measure against time of last - // observed RenewTime. + // observed renewTime. // +optional optional int32 leaseDurationSeconds = 2; diff --git a/cluster-autoscaler/vendor/k8s.io/api/coordination/v1beta1/types.go b/cluster-autoscaler/vendor/k8s.io/api/coordination/v1beta1/types.go index 8f300fca8548..3a3d5f32e214 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/coordination/v1beta1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/coordination/v1beta1/types.go @@ -33,7 +33,7 @@ type Lease struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the Lease. + // spec contains the specification of the Lease. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec LeaseSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -46,7 +46,7 @@ type LeaseSpec struct { HolderIdentity *string `json:"holderIdentity,omitempty" protobuf:"bytes,1,opt,name=holderIdentity"` // leaseDurationSeconds is a duration that candidates for a lease need // to wait to force acquire it. This is measure against time of last - // observed RenewTime. + // observed renewTime. // +optional LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty" protobuf:"varint,2,opt,name=leaseDurationSeconds"` // acquireTime is a time when the current lease was acquired. @@ -75,6 +75,6 @@ type LeaseList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []Lease `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/cluster-autoscaler/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go index f557d265d4c5..78ca4e393fc7 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go @@ -24,13 +24,13 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Lease = map[string]string{ "": "Lease defines a lease concept.", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec contains the specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Lease) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (Lease) SwaggerDoc() map[string]string { var map_LeaseList = map[string]string{ "": "LeaseList is a list of Lease objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (LeaseList) SwaggerDoc() map[string]string { @@ -50,7 +50,7 @@ func (LeaseList) SwaggerDoc() map[string]string { var map_LeaseSpec = map[string]string{ "": "LeaseSpec is a specification of a Lease.", "holderIdentity": "holderIdentity contains the identity of the holder of a current lease.", - "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime.", + "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed renewTime.", "acquireTime": "acquireTime is a time when the current lease was acquired.", "renewTime": "renewTime is a time when the current holder of a lease has last updated the lease.", "leaseTransitions": "leaseTransitions is the number of transitions of a lease between holders.", diff --git a/cluster-autoscaler/vendor/k8s.io/api/core/v1/annotation_key_constants.go b/cluster-autoscaler/vendor/k8s.io/api/core/v1/annotation_key_constants.go index eb9517e1dd97..8f99648587c3 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/core/v1/annotation_key_constants.go +++ b/cluster-autoscaler/vendor/k8s.io/api/core/v1/annotation_key_constants.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file should be consistent with pkg/api/annotation_key_constants.go. +// This file should be consistent with pkg/apis/core/annotation_key_constants.go. package v1 diff --git a/cluster-autoscaler/vendor/k8s.io/api/core/v1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/core/v1/generated.pb.go index a8df2b222e93..c7cc7ee78c6a 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/core/v1/generated.pb.go @@ -6321,915 +6321,915 @@ func init() { var fileDescriptor_83c10c24ec417dc9 = []byte{ // 14547 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x69, 0x8c, 0x24, 0xd7, - 0x79, 0x98, 0xaa, 0x7b, 0xae, 0xfe, 0xe6, 0x7e, 0xb3, 0xbb, 0x9c, 0x1d, 0x72, 0x77, 0x96, 0x45, - 0x72, 0xb9, 0x14, 0xc9, 0x19, 0x2d, 0x0f, 0x89, 0x26, 0x25, 0x5a, 0x73, 0xee, 0x36, 0x77, 0x67, - 0xb6, 0xf9, 0x7a, 0x76, 0x57, 0x07, 0x25, 0xa8, 0xa6, 0xfb, 0xcd, 0x4c, 0x69, 0xba, 0xab, 0x9a, - 0x55, 0xd5, 0xb3, 0x3b, 0x8c, 0x84, 0x38, 0xf2, 0x29, 0xdb, 0x09, 0x84, 0xc0, 0x39, 0x20, 0x1b, - 0x46, 0xe0, 0x38, 0xb6, 0x15, 0xe5, 0x52, 0xe4, 0xd8, 0x8e, 0xe5, 0xd8, 0xce, 0xed, 0x04, 0x81, - 0xe3, 0x18, 0x88, 0x65, 0xc0, 0xc8, 0xc4, 0x5e, 0x07, 0x30, 0x04, 0x24, 0xb6, 0x73, 0x01, 0xc9, - 0xc4, 0x89, 0x83, 0x77, 0xd6, 0x7b, 0x75, 0x74, 0xf7, 0x2c, 0x67, 0x47, 0x94, 0xc0, 0x7f, 0xdd, - 0xdf, 0xf7, 0xbd, 0xef, 0xbd, 0x7a, 0xe7, 0xf7, 0xbe, 0xef, 0x7b, 0xdf, 0x07, 0xaf, 0xec, 0xbe, - 0x14, 0xce, 0xb9, 0xfe, 0xfc, 0x6e, 0x7b, 0x93, 0x04, 0x1e, 0x89, 0x48, 0x38, 0xbf, 0x47, 0xbc, - 0xba, 0x1f, 0xcc, 0x0b, 0x84, 0xd3, 0x72, 0xe7, 0x6b, 0x7e, 0x40, 0xe6, 0xf7, 0x2e, 0xcf, 0x6f, - 0x13, 0x8f, 0x04, 0x4e, 0x44, 0xea, 0x73, 0xad, 0xc0, 0x8f, 0x7c, 0x84, 0x38, 0xcd, 0x9c, 0xd3, - 0x72, 0xe7, 0x28, 0xcd, 0xdc, 0xde, 0xe5, 0x99, 0x67, 0xb7, 0xdd, 0x68, 0xa7, 0xbd, 0x39, 0x57, - 0xf3, 0x9b, 0xf3, 0xdb, 0xfe, 0xb6, 0x3f, 0xcf, 0x48, 0x37, 0xdb, 0x5b, 0xec, 0x1f, 0xfb, 0xc3, - 0x7e, 0x71, 0x16, 0x33, 0x2f, 0xc4, 0xd5, 0x34, 0x9d, 0xda, 0x8e, 0xeb, 0x91, 0x60, 0x7f, 0xbe, - 0xb5, 0xbb, 0xcd, 0xea, 0x0d, 0x48, 0xe8, 0xb7, 0x83, 0x1a, 0x49, 0x56, 0xdc, 0xb1, 0x54, 0x38, - 0xdf, 0x24, 0x91, 0x93, 0xd1, 0xdc, 0x99, 0xf9, 0xbc, 0x52, 0x41, 0xdb, 0x8b, 0xdc, 0x66, 0xba, - 0x9a, 0xf7, 0x77, 0x2b, 0x10, 0xd6, 0x76, 0x48, 0xd3, 0x49, 0x95, 0x7b, 0x3e, 0xaf, 0x5c, 0x3b, - 0x72, 0x1b, 0xf3, 0xae, 0x17, 0x85, 0x51, 0x90, 0x2c, 0x64, 0x7f, 0xdd, 0x82, 0x0b, 0x0b, 0xb7, - 0xab, 0x2b, 0x0d, 0x27, 0x8c, 0xdc, 0xda, 0x62, 0xc3, 0xaf, 0xed, 0x56, 0x23, 0x3f, 0x20, 0xb7, - 0xfc, 0x46, 0xbb, 0x49, 0xaa, 0xac, 0x23, 0xd0, 0x33, 0x30, 0xb4, 0xc7, 0xfe, 0x97, 0x97, 0xa7, - 0xad, 0x0b, 0xd6, 0xa5, 0xd2, 0xe2, 0xc4, 0xaf, 0x1d, 0xcc, 0xbe, 0xe7, 0xde, 0xc1, 0xec, 0xd0, - 0x2d, 0x01, 0xc7, 0x8a, 0x02, 0x5d, 0x84, 0x81, 0xad, 0x70, 0x63, 0xbf, 0x45, 0xa6, 0x0b, 0x8c, - 0x76, 0x4c, 0xd0, 0x0e, 0xac, 0x56, 0x29, 0x14, 0x0b, 0x2c, 0x9a, 0x87, 0x52, 0xcb, 0x09, 0x22, - 0x37, 0x72, 0x7d, 0x6f, 0xba, 0x78, 0xc1, 0xba, 0xd4, 0xbf, 0x38, 0x29, 0x48, 0x4b, 0x15, 0x89, - 0xc0, 0x31, 0x0d, 0x6d, 0x46, 0x40, 0x9c, 0xfa, 0x0d, 0xaf, 0xb1, 0x3f, 0xdd, 0x77, 0xc1, 0xba, - 0x34, 0x14, 0x37, 0x03, 0x0b, 0x38, 0x56, 0x14, 0xf6, 0x17, 0x0b, 0x30, 0xb4, 0xb0, 0xb5, 0xe5, - 0x7a, 0x6e, 0xb4, 0x8f, 0x6e, 0xc1, 0x88, 0xe7, 0xd7, 0x89, 0xfc, 0xcf, 0xbe, 0x62, 0xf8, 0xb9, - 0x0b, 0x73, 0xe9, 0xa9, 0x34, 0xb7, 0xae, 0xd1, 0x2d, 0x4e, 0xdc, 0x3b, 0x98, 0x1d, 0xd1, 0x21, - 0xd8, 0xe0, 0x83, 0x30, 0x0c, 0xb7, 0xfc, 0xba, 0x62, 0x5b, 0x60, 0x6c, 0x67, 0xb3, 0xd8, 0x56, - 0x62, 0xb2, 0xc5, 0xf1, 0x7b, 0x07, 0xb3, 0xc3, 0x1a, 0x00, 0xeb, 0x4c, 0xd0, 0x26, 0x8c, 0xd3, - 0xbf, 0x5e, 0xe4, 0x2a, 0xbe, 0x45, 0xc6, 0xf7, 0xb1, 0x3c, 0xbe, 0x1a, 0xe9, 0xe2, 0xd4, 0xbd, - 0x83, 0xd9, 0xf1, 0x04, 0x10, 0x27, 0x19, 0xda, 0x6f, 0xc1, 0xd8, 0x42, 0x14, 0x39, 0xb5, 0x1d, - 0x52, 0xe7, 0x23, 0x88, 0x5e, 0x80, 0x3e, 0xcf, 0x69, 0x12, 0x31, 0xbe, 0x17, 0x44, 0xc7, 0xf6, - 0xad, 0x3b, 0x4d, 0x72, 0x78, 0x30, 0x3b, 0x71, 0xd3, 0x73, 0xdf, 0x6c, 0x8b, 0x59, 0x41, 0x61, - 0x98, 0x51, 0xa3, 0xe7, 0x00, 0xea, 0x64, 0xcf, 0xad, 0x91, 0x8a, 0x13, 0xed, 0x88, 0xf1, 0x46, - 0xa2, 0x2c, 0x2c, 0x2b, 0x0c, 0xd6, 0xa8, 0xec, 0xbb, 0x50, 0x5a, 0xd8, 0xf3, 0xdd, 0x7a, 0xc5, - 0xaf, 0x87, 0x68, 0x17, 0xc6, 0x5b, 0x01, 0xd9, 0x22, 0x81, 0x02, 0x4d, 0x5b, 0x17, 0x8a, 0x97, - 0x86, 0x9f, 0xbb, 0x94, 0xf9, 0xb1, 0x26, 0xe9, 0x8a, 0x17, 0x05, 0xfb, 0x8b, 0x0f, 0x89, 0xfa, - 0xc6, 0x13, 0x58, 0x9c, 0xe4, 0x6c, 0xff, 0xb3, 0x02, 0x9c, 0x5e, 0x78, 0xab, 0x1d, 0x90, 0x65, - 0x37, 0xdc, 0x4d, 0xce, 0xf0, 0xba, 0x1b, 0xee, 0xae, 0xc7, 0x3d, 0xa0, 0xa6, 0xd6, 0xb2, 0x80, - 0x63, 0x45, 0x81, 0x9e, 0x85, 0x41, 0xfa, 0xfb, 0x26, 0x2e, 0x8b, 0x4f, 0x9e, 0x12, 0xc4, 0xc3, - 0xcb, 0x4e, 0xe4, 0x2c, 0x73, 0x14, 0x96, 0x34, 0x68, 0x0d, 0x86, 0x6b, 0x6c, 0x41, 0x6e, 0xaf, - 0xf9, 0x75, 0xc2, 0x06, 0xb3, 0xb4, 0xf8, 0x34, 0x25, 0x5f, 0x8a, 0xc1, 0x87, 0x07, 0xb3, 0xd3, - 0xbc, 0x6d, 0x82, 0x85, 0x86, 0xc3, 0x7a, 0x79, 0x64, 0xab, 0xf5, 0xd5, 0xc7, 0x38, 0x41, 0xc6, - 0xda, 0xba, 0xa4, 0x2d, 0x95, 0x7e, 0xb6, 0x54, 0x46, 0xb2, 0x97, 0x09, 0xba, 0x0c, 0x7d, 0xbb, - 0xae, 0x57, 0x9f, 0x1e, 0x60, 0xbc, 0xce, 0xd1, 0x31, 0xbf, 0xe6, 0x7a, 0xf5, 0xc3, 0x83, 0xd9, - 0x49, 0xa3, 0x39, 0x14, 0x88, 0x19, 0xa9, 0xfd, 0xdf, 0x2d, 0x98, 0x65, 0xb8, 0x55, 0xb7, 0x41, - 0x2a, 0x24, 0x08, 0xdd, 0x30, 0x22, 0x5e, 0x64, 0x74, 0xe8, 0x73, 0x00, 0x21, 0xa9, 0x05, 0x24, - 0xd2, 0xba, 0x54, 0x4d, 0x8c, 0xaa, 0xc2, 0x60, 0x8d, 0x8a, 0x6e, 0x08, 0xe1, 0x8e, 0x13, 0xb0, - 0xf9, 0x25, 0x3a, 0x56, 0x6d, 0x08, 0x55, 0x89, 0xc0, 0x31, 0x8d, 0xb1, 0x21, 0x14, 0xbb, 0x6d, - 0x08, 0xe8, 0x43, 0x30, 0x1e, 0x57, 0x16, 0xb6, 0x9c, 0x9a, 0xec, 0x40, 0xb6, 0x64, 0xaa, 0x26, - 0x0a, 0x27, 0x69, 0xed, 0xbf, 0x69, 0x89, 0xc9, 0x43, 0xbf, 0xfa, 0x1d, 0xfe, 0xad, 0xf6, 0x2f, - 0x58, 0x30, 0xb8, 0xe8, 0x7a, 0x75, 0xd7, 0xdb, 0x46, 0x9f, 0x82, 0x21, 0x7a, 0x36, 0xd5, 0x9d, - 0xc8, 0x11, 0xfb, 0xde, 0xfb, 0xb4, 0xb5, 0xa5, 0x8e, 0x8a, 0xb9, 0xd6, 0xee, 0x36, 0x05, 0x84, - 0x73, 0x94, 0x9a, 0xae, 0xb6, 0x1b, 0x9b, 0x9f, 0x26, 0xb5, 0x68, 0x8d, 0x44, 0x4e, 0xfc, 0x39, - 0x31, 0x0c, 0x2b, 0xae, 0xe8, 0x1a, 0x0c, 0x44, 0x4e, 0xb0, 0x4d, 0x22, 0xb1, 0x01, 0x66, 0x6e, - 0x54, 0xbc, 0x24, 0xa6, 0x2b, 0x92, 0x78, 0x35, 0x12, 0x1f, 0x0b, 0x1b, 0xac, 0x28, 0x16, 0x2c, - 0xec, 0xff, 0x3b, 0x08, 0x67, 0x97, 0xaa, 0xe5, 0x9c, 0x79, 0x75, 0x11, 0x06, 0xea, 0x81, 0xbb, - 0x47, 0x02, 0xd1, 0xcf, 0x8a, 0xcb, 0x32, 0x83, 0x62, 0x81, 0x45, 0x2f, 0xc1, 0x08, 0x3f, 0x90, - 0xae, 0x3a, 0x5e, 0xbd, 0x21, 0xbb, 0xf8, 0x94, 0xa0, 0x1e, 0xb9, 0xa5, 0xe1, 0xb0, 0x41, 0x79, - 0xc4, 0x49, 0x75, 0x31, 0xb1, 0x18, 0xf3, 0x0e, 0xbb, 0xcf, 0x5b, 0x30, 0xc1, 0xab, 0x59, 0x88, - 0xa2, 0xc0, 0xdd, 0x6c, 0x47, 0x24, 0x9c, 0xee, 0x67, 0x3b, 0xdd, 0x52, 0x56, 0x6f, 0xe5, 0xf6, - 0xc0, 0xdc, 0xad, 0x04, 0x17, 0xbe, 0x09, 0x4e, 0x8b, 0x7a, 0x27, 0x92, 0x68, 0x9c, 0xaa, 0x16, - 0x7d, 0xb7, 0x05, 0x33, 0x35, 0xdf, 0x8b, 0x02, 0xbf, 0xd1, 0x20, 0x41, 0xa5, 0xbd, 0xd9, 0x70, - 0xc3, 0x1d, 0x3e, 0x4f, 0x31, 0xd9, 0x62, 0x3b, 0x41, 0xce, 0x18, 0x2a, 0x22, 0x31, 0x86, 0xe7, - 0xef, 0x1d, 0xcc, 0xce, 0x2c, 0xe5, 0xb2, 0xc2, 0x1d, 0xaa, 0x41, 0xbb, 0x80, 0xe8, 0x51, 0x5a, - 0x8d, 0x9c, 0x6d, 0x12, 0x57, 0x3e, 0xd8, 0x7b, 0xe5, 0x67, 0xee, 0x1d, 0xcc, 0xa2, 0xf5, 0x14, - 0x0b, 0x9c, 0xc1, 0x16, 0xbd, 0x09, 0xa7, 0x28, 0x34, 0xf5, 0xad, 0x43, 0xbd, 0x57, 0x37, 0x7d, - 0xef, 0x60, 0xf6, 0xd4, 0x7a, 0x06, 0x13, 0x9c, 0xc9, 0x1a, 0x7d, 0x97, 0x05, 0x67, 0xe3, 0xcf, - 0x5f, 0xb9, 0xdb, 0x72, 0xbc, 0x7a, 0x5c, 0x71, 0xa9, 0xf7, 0x8a, 0xe9, 0x9e, 0x7c, 0x76, 0x29, - 0x8f, 0x13, 0xce, 0xaf, 0x04, 0x79, 0x30, 0x45, 0x9b, 0x96, 0xac, 0x1b, 0x7a, 0xaf, 0xfb, 0xa1, - 0x7b, 0x07, 0xb3, 0x53, 0xeb, 0x69, 0x1e, 0x38, 0x8b, 0xf1, 0xcc, 0x12, 0x9c, 0xce, 0x9c, 0x9d, - 0x68, 0x02, 0x8a, 0xbb, 0x84, 0x4b, 0x5d, 0x25, 0x4c, 0x7f, 0xa2, 0x53, 0xd0, 0xbf, 0xe7, 0x34, - 0xda, 0x62, 0x61, 0x62, 0xfe, 0xe7, 0xe5, 0xc2, 0x4b, 0x96, 0xfd, 0xcf, 0x8b, 0x30, 0xbe, 0x54, - 0x2d, 0xdf, 0xd7, 0xaa, 0xd7, 0x8f, 0xbd, 0x42, 0xc7, 0x63, 0x2f, 0x3e, 0x44, 0x8b, 0xb9, 0x87, - 0xe8, 0x9f, 0xcd, 0x58, 0xb2, 0x7d, 0x6c, 0xc9, 0x7e, 0x47, 0xce, 0x92, 0x3d, 0xe6, 0x85, 0xba, - 0x97, 0x33, 0x6b, 0xfb, 0xd9, 0x00, 0x66, 0x4a, 0x48, 0xd7, 0xfd, 0x9a, 0xd3, 0x48, 0x6e, 0xb5, - 0x47, 0x9c, 0xba, 0xc7, 0x33, 0x8e, 0x35, 0x18, 0x59, 0x72, 0x5a, 0xce, 0xa6, 0xdb, 0x70, 0x23, - 0x97, 0x84, 0xe8, 0x49, 0x28, 0x3a, 0xf5, 0x3a, 0x93, 0xee, 0x4a, 0x8b, 0xa7, 0xef, 0x1d, 0xcc, - 0x16, 0x17, 0xea, 0x54, 0xcc, 0x00, 0x45, 0xb5, 0x8f, 0x29, 0x05, 0x7a, 0x2f, 0xf4, 0xd5, 0x03, - 0xbf, 0x35, 0x5d, 0x60, 0x94, 0x74, 0x95, 0xf7, 0x2d, 0x07, 0x7e, 0x2b, 0x41, 0xca, 0x68, 0xec, - 0x5f, 0x2d, 0xc0, 0x23, 0x4b, 0xa4, 0xb5, 0xb3, 0x5a, 0xcd, 0x39, 0x2f, 0x2e, 0xc1, 0x50, 0xd3, - 0xf7, 0xdc, 0xc8, 0x0f, 0x42, 0x51, 0x35, 0x9b, 0x11, 0x6b, 0x02, 0x86, 0x15, 0x16, 0x5d, 0x80, - 0xbe, 0x56, 0x2c, 0xc4, 0x8e, 0x48, 0x01, 0x98, 0x89, 0xaf, 0x0c, 0x43, 0x29, 0xda, 0x21, 0x09, - 0xc4, 0x8c, 0x51, 0x14, 0x37, 0x43, 0x12, 0x60, 0x86, 0x89, 0x25, 0x01, 0x2a, 0x23, 0x88, 0x13, - 0x21, 0x21, 0x09, 0x50, 0x0c, 0xd6, 0xa8, 0x50, 0x05, 0x4a, 0x61, 0x62, 0x64, 0x7b, 0x5a, 0x9a, - 0xa3, 0x4c, 0x54, 0x50, 0x23, 0x19, 0x33, 0x31, 0x4e, 0xb0, 0x81, 0xae, 0xa2, 0xc2, 0xd7, 0x0a, - 0x80, 0x78, 0x17, 0x7e, 0x8b, 0x75, 0xdc, 0xcd, 0x74, 0xc7, 0xf5, 0xbe, 0x24, 0x8e, 0xab, 0xf7, - 0xfe, 0x87, 0x05, 0x8f, 0x2c, 0xb9, 0x5e, 0x9d, 0x04, 0x39, 0x13, 0xf0, 0xc1, 0xdc, 0x9d, 0x8f, - 0x26, 0xa4, 0x18, 0x53, 0xac, 0xef, 0x18, 0xa6, 0x98, 0xfd, 0x47, 0x16, 0x20, 0xfe, 0xd9, 0xef, - 0xb8, 0x8f, 0xbd, 0x99, 0xfe, 0xd8, 0x63, 0x98, 0x16, 0xf6, 0xdf, 0xb5, 0x60, 0x78, 0xa9, 0xe1, - 0xb8, 0x4d, 0xf1, 0xa9, 0x4b, 0x30, 0x29, 0x15, 0x45, 0x0c, 0xac, 0xc9, 0xfe, 0x74, 0x73, 0x9b, - 0xc4, 0x49, 0x24, 0x4e, 0xd3, 0xa3, 0x8f, 0xc3, 0x59, 0x03, 0xb8, 0x41, 0x9a, 0xad, 0x86, 0x13, - 0xe9, 0xb7, 0x02, 0x76, 0xfa, 0xe3, 0x3c, 0x22, 0x9c, 0x5f, 0xde, 0xbe, 0x0e, 0x63, 0x4b, 0x0d, - 0x97, 0x78, 0x51, 0xb9, 0xb2, 0xe4, 0x7b, 0x5b, 0xee, 0x36, 0x7a, 0x19, 0xc6, 0x22, 0xb7, 0x49, - 0xfc, 0x76, 0x54, 0x25, 0x35, 0xdf, 0x63, 0x77, 0x6d, 0xeb, 0x52, 0xff, 0x22, 0xba, 0x77, 0x30, - 0x3b, 0xb6, 0x61, 0x60, 0x70, 0x82, 0xd2, 0xfe, 0x1d, 0x3a, 0xe2, 0x7e, 0xb3, 0xe5, 0x7b, 0xc4, - 0x8b, 0x96, 0x7c, 0xaf, 0xce, 0x75, 0x32, 0x2f, 0x43, 0x5f, 0x44, 0x47, 0x90, 0x7f, 0xf9, 0x45, - 0xb9, 0xb4, 0xe9, 0xb8, 0x1d, 0x1e, 0xcc, 0x9e, 0x49, 0x97, 0x60, 0x23, 0xcb, 0xca, 0xa0, 0xef, - 0x80, 0x81, 0x30, 0x72, 0xa2, 0x76, 0x28, 0x3e, 0xf5, 0x51, 0x39, 0xfe, 0x55, 0x06, 0x3d, 0x3c, - 0x98, 0x1d, 0x57, 0xc5, 0x38, 0x08, 0x8b, 0x02, 0xe8, 0x29, 0x18, 0x6c, 0x92, 0x30, 0x74, 0xb6, - 0xe5, 0xf9, 0x3d, 0x2e, 0xca, 0x0e, 0xae, 0x71, 0x30, 0x96, 0x78, 0xf4, 0x18, 0xf4, 0x93, 0x20, - 0xf0, 0x03, 0xb1, 0xab, 0x8c, 0x0a, 0xc2, 0xfe, 0x15, 0x0a, 0xc4, 0x1c, 0x67, 0xff, 0x5b, 0x0b, - 0xc6, 0x55, 0x5b, 0x79, 0x5d, 0x27, 0x70, 0x6f, 0xfa, 0x18, 0x40, 0x4d, 0x7e, 0x60, 0xc8, 0xce, - 0xbb, 0xe1, 0xe7, 0x2e, 0x66, 0x8a, 0x16, 0xa9, 0x6e, 0x8c, 0x39, 0x2b, 0x50, 0x88, 0x35, 0x6e, - 0xf6, 0x3f, 0xb2, 0x60, 0x2a, 0xf1, 0x45, 0xd7, 0xdd, 0x30, 0x42, 0x6f, 0xa4, 0xbe, 0x6a, 0xae, - 0xb7, 0xaf, 0xa2, 0xa5, 0xd9, 0x37, 0xa9, 0xc5, 0x27, 0x21, 0xda, 0x17, 0x5d, 0x85, 0x7e, 0x37, - 0x22, 0x4d, 0xf9, 0x31, 0x8f, 0x75, 0xfc, 0x18, 0xde, 0xaa, 0x78, 0x44, 0xca, 0xb4, 0x24, 0xe6, - 0x0c, 0xec, 0x5f, 0x2d, 0x42, 0x89, 0x4f, 0xdb, 0x35, 0xa7, 0x75, 0x02, 0x63, 0xf1, 0x34, 0x94, - 0xdc, 0x66, 0xb3, 0x1d, 0x39, 0x9b, 0xe2, 0x00, 0x1a, 0xe2, 0x9b, 0x41, 0x59, 0x02, 0x71, 0x8c, - 0x47, 0x65, 0xe8, 0x63, 0x4d, 0xe1, 0x5f, 0xf9, 0x64, 0xf6, 0x57, 0x8a, 0xb6, 0xcf, 0x2d, 0x3b, - 0x91, 0xc3, 0x65, 0x3f, 0x75, 0xf2, 0x51, 0x10, 0x66, 0x2c, 0x90, 0x03, 0xb0, 0xe9, 0x7a, 0x4e, - 0xb0, 0x4f, 0x61, 0xd3, 0x45, 0xc6, 0xf0, 0xd9, 0xce, 0x0c, 0x17, 0x15, 0x3d, 0x67, 0xab, 0x3e, - 0x2c, 0x46, 0x60, 0x8d, 0xe9, 0xcc, 0x07, 0xa0, 0xa4, 0x88, 0x8f, 0x22, 0xc2, 0xcd, 0x7c, 0x08, - 0xc6, 0x13, 0x75, 0x75, 0x2b, 0x3e, 0xa2, 0x4b, 0x80, 0xbf, 0xc8, 0xb6, 0x0c, 0xd1, 0xea, 0x15, - 0x6f, 0x4f, 0xec, 0x9c, 0x6f, 0xc1, 0xa9, 0x46, 0xc6, 0xde, 0x2b, 0xc6, 0xb5, 0xf7, 0xbd, 0xfa, - 0x11, 0xf1, 0xd9, 0xa7, 0xb2, 0xb0, 0x38, 0xb3, 0x0e, 0x2a, 0xd5, 0xf8, 0x2d, 0xba, 0x40, 0x9c, - 0x86, 0x7e, 0x41, 0xb8, 0x21, 0x60, 0x58, 0x61, 0xe9, 0x7e, 0x77, 0x4a, 0x35, 0xfe, 0x1a, 0xd9, - 0xaf, 0x92, 0x06, 0xa9, 0x45, 0x7e, 0xf0, 0x4d, 0x6d, 0xfe, 0x39, 0xde, 0xfb, 0x7c, 0xbb, 0x1c, - 0x16, 0x0c, 0x8a, 0xd7, 0xc8, 0x3e, 0x1f, 0x0a, 0xfd, 0xeb, 0x8a, 0x1d, 0xbf, 0xee, 0x2b, 0x16, - 0x8c, 0xaa, 0xaf, 0x3b, 0x81, 0x7d, 0x61, 0xd1, 0xdc, 0x17, 0xce, 0x75, 0x9c, 0xe0, 0x39, 0x3b, - 0xc2, 0xd7, 0x0a, 0x70, 0x56, 0xd1, 0xd0, 0xdb, 0x0c, 0xff, 0x23, 0x66, 0xd5, 0x3c, 0x94, 0x3c, - 0xa5, 0xd7, 0xb3, 0x4c, 0x85, 0x5a, 0xac, 0xd5, 0x8b, 0x69, 0xa8, 0x50, 0xea, 0xc5, 0xc7, 0xec, - 0x88, 0xae, 0xf0, 0x16, 0xca, 0xed, 0x45, 0x28, 0xb6, 0xdd, 0xba, 0x38, 0x60, 0xde, 0x27, 0x7b, - 0xfb, 0x66, 0x79, 0xf9, 0xf0, 0x60, 0xf6, 0xd1, 0x3c, 0x63, 0x0b, 0x3d, 0xd9, 0xc2, 0xb9, 0x9b, - 0xe5, 0x65, 0x4c, 0x0b, 0xa3, 0x05, 0x18, 0x97, 0x27, 0xf4, 0x2d, 0x2a, 0x20, 0xfa, 0x9e, 0x38, - 0x87, 0x94, 0xd6, 0x1a, 0x9b, 0x68, 0x9c, 0xa4, 0x47, 0xcb, 0x30, 0xb1, 0xdb, 0xde, 0x24, 0x0d, - 0x12, 0xf1, 0x0f, 0xbe, 0x46, 0xb8, 0x4e, 0xb7, 0x14, 0xdf, 0x25, 0xaf, 0x25, 0xf0, 0x38, 0x55, - 0xc2, 0xfe, 0x53, 0x76, 0x1e, 0x88, 0xde, 0xab, 0x04, 0x3e, 0x9d, 0x58, 0x94, 0xfb, 0x37, 0x73, - 0x3a, 0xf7, 0x32, 0x2b, 0xae, 0x91, 0xfd, 0x0d, 0x9f, 0xde, 0x25, 0xb2, 0x67, 0x85, 0x31, 0xe7, - 0xfb, 0x3a, 0xce, 0xf9, 0x9f, 0x2d, 0xc0, 0x69, 0xd5, 0x03, 0x86, 0xd8, 0xfa, 0xad, 0xde, 0x07, - 0x97, 0x61, 0xb8, 0x4e, 0xb6, 0x9c, 0x76, 0x23, 0x52, 0x06, 0x86, 0x7e, 0x6e, 0x64, 0x5a, 0x8e, - 0xc1, 0x58, 0xa7, 0x39, 0x42, 0xb7, 0xfd, 0xcf, 0x61, 0x76, 0x10, 0x47, 0x0e, 0x9d, 0xe3, 0x6a, - 0xd5, 0x58, 0xb9, 0xab, 0xe6, 0x31, 0xe8, 0x77, 0x9b, 0x54, 0x30, 0x2b, 0x98, 0xf2, 0x56, 0x99, - 0x02, 0x31, 0xc7, 0xa1, 0x27, 0x60, 0xb0, 0xe6, 0x37, 0x9b, 0x8e, 0x57, 0x67, 0x47, 0x5e, 0x69, - 0x71, 0x98, 0xca, 0x6e, 0x4b, 0x1c, 0x84, 0x25, 0x0e, 0x3d, 0x02, 0x7d, 0x4e, 0xb0, 0xcd, 0xb5, - 0x2e, 0xa5, 0xc5, 0x21, 0x5a, 0xd3, 0x42, 0xb0, 0x1d, 0x62, 0x06, 0xa5, 0x97, 0xc6, 0x3b, 0x7e, - 0xb0, 0xeb, 0x7a, 0xdb, 0xcb, 0x6e, 0x20, 0x96, 0x84, 0x3a, 0x0b, 0x6f, 0x2b, 0x0c, 0xd6, 0xa8, - 0xd0, 0x2a, 0xf4, 0xb7, 0xfc, 0x20, 0x0a, 0xa7, 0x07, 0x58, 0x77, 0x3f, 0x9a, 0xb3, 0x11, 0xf1, - 0xaf, 0xad, 0xf8, 0x41, 0x14, 0x7f, 0x00, 0xfd, 0x17, 0x62, 0x5e, 0x1c, 0x5d, 0x87, 0x41, 0xe2, - 0xed, 0xad, 0x06, 0x7e, 0x73, 0x7a, 0x2a, 0x9f, 0xd3, 0x0a, 0x27, 0xe1, 0xd3, 0x2c, 0x96, 0x51, - 0x05, 0x18, 0x4b, 0x16, 0xe8, 0x3b, 0xa0, 0x48, 0xbc, 0xbd, 0xe9, 0x41, 0xc6, 0x69, 0x26, 0x87, - 0xd3, 0x2d, 0x27, 0x88, 0xf7, 0xfc, 0x15, 0x6f, 0x0f, 0xd3, 0x32, 0xe8, 0xa3, 0x50, 0x92, 0x1b, - 0x46, 0x28, 0xd4, 0x99, 0x99, 0x13, 0x56, 0x6e, 0x33, 0x98, 0xbc, 0xd9, 0x76, 0x03, 0xd2, 0x24, - 0x5e, 0x14, 0xc6, 0x3b, 0xa4, 0xc4, 0x86, 0x38, 0xe6, 0x86, 0x3e, 0x2a, 0x75, 0xe8, 0x6b, 0x7e, - 0xdb, 0x8b, 0xc2, 0xe9, 0x12, 0x6b, 0x5e, 0xa6, 0x75, 0xf3, 0x56, 0x4c, 0x97, 0x54, 0xb2, 0xf3, - 0xc2, 0xd8, 0x60, 0x85, 0x3e, 0x01, 0xa3, 0xfc, 0x3f, 0xb7, 0x11, 0x86, 0xd3, 0xa7, 0x19, 0xef, - 0x0b, 0xf9, 0xbc, 0x39, 0xe1, 0xe2, 0x69, 0xc1, 0x7c, 0x54, 0x87, 0x86, 0xd8, 0xe4, 0x86, 0x30, - 0x8c, 0x36, 0xdc, 0x3d, 0xe2, 0x91, 0x30, 0xac, 0x04, 0xfe, 0x26, 0x11, 0x2a, 0xcf, 0xb3, 0xd9, - 0x36, 0x45, 0x7f, 0x93, 0x2c, 0x4e, 0x52, 0x9e, 0xd7, 0xf5, 0x32, 0xd8, 0x64, 0x81, 0x6e, 0xc2, - 0x18, 0xbd, 0x63, 0xba, 0x31, 0xd3, 0xe1, 0x6e, 0x4c, 0xd9, 0xbd, 0x0a, 0x1b, 0x85, 0x70, 0x82, - 0x09, 0xba, 0x01, 0x23, 0x61, 0xe4, 0x04, 0x51, 0xbb, 0xc5, 0x99, 0x9e, 0xe9, 0xc6, 0x94, 0x99, - 0xa4, 0xab, 0x5a, 0x11, 0x6c, 0x30, 0x40, 0xaf, 0x41, 0xa9, 0xe1, 0x6e, 0x91, 0xda, 0x7e, 0xad, - 0x41, 0xa6, 0x47, 0x18, 0xb7, 0xcc, 0x4d, 0xe5, 0xba, 0x24, 0xe2, 0x72, 0xae, 0xfa, 0x8b, 0xe3, - 0xe2, 0xe8, 0x16, 0x9c, 0x89, 0x48, 0xd0, 0x74, 0x3d, 0x87, 0x6e, 0x06, 0xe2, 0x6a, 0xc5, 0x4c, - 0xbd, 0xa3, 0x6c, 0xb5, 0x9d, 0x17, 0xa3, 0x71, 0x66, 0x23, 0x93, 0x0a, 0xe7, 0x94, 0x46, 0x77, - 0x61, 0x3a, 0x03, 0xe3, 0x37, 0xdc, 0xda, 0xfe, 0xf4, 0x29, 0xc6, 0xf9, 0x83, 0x82, 0xf3, 0xf4, - 0x46, 0x0e, 0xdd, 0x61, 0x07, 0x1c, 0xce, 0xe5, 0x8e, 0x6e, 0xc0, 0x38, 0xdb, 0x81, 0x2a, 0xed, - 0x46, 0x43, 0x54, 0x38, 0xc6, 0x2a, 0x7c, 0x42, 0x9e, 0xc7, 0x65, 0x13, 0x7d, 0x78, 0x30, 0x0b, - 0xf1, 0x3f, 0x9c, 0x2c, 0x8d, 0x36, 0x99, 0x55, 0xb1, 0x1d, 0xb8, 0xd1, 0x3e, 0xdd, 0x37, 0xc8, - 0xdd, 0x68, 0x7a, 0xbc, 0xa3, 0x86, 0x45, 0x27, 0x55, 0xa6, 0x47, 0x1d, 0x88, 0x93, 0x0c, 0xe9, - 0x96, 0x1a, 0x46, 0x75, 0xd7, 0x9b, 0x9e, 0xe0, 0xf7, 0x12, 0xb9, 0x23, 0x55, 0x29, 0x10, 0x73, - 0x1c, 0xb3, 0x28, 0xd2, 0x1f, 0x37, 0xe8, 0xc9, 0x35, 0xc9, 0x08, 0x63, 0x8b, 0xa2, 0x44, 0xe0, - 0x98, 0x86, 0x0a, 0x93, 0x51, 0xb4, 0x3f, 0x8d, 0x18, 0xa9, 0xda, 0x58, 0x36, 0x36, 0x3e, 0x8a, - 0x29, 0xdc, 0xde, 0x84, 0x31, 0xb5, 0x11, 0xb2, 0x3e, 0x41, 0xb3, 0xd0, 0xcf, 0xc4, 0x27, 0xa1, - 0x0f, 0x2c, 0xd1, 0x26, 0x30, 0xd1, 0x0a, 0x73, 0x38, 0x6b, 0x82, 0xfb, 0x16, 0x59, 0xdc, 0x8f, - 0x08, 0xbf, 0xd3, 0x17, 0xb5, 0x26, 0x48, 0x04, 0x8e, 0x69, 0xec, 0xff, 0xc7, 0xc5, 0xd0, 0x78, - 0xb7, 0xed, 0xe1, 0x7c, 0x79, 0x06, 0x86, 0x76, 0xfc, 0x30, 0xa2, 0xd4, 0xac, 0x8e, 0xfe, 0x58, - 0xf0, 0xbc, 0x2a, 0xe0, 0x58, 0x51, 0xa0, 0x57, 0x60, 0xb4, 0xa6, 0x57, 0x20, 0x0e, 0x47, 0xb5, - 0x8d, 0x18, 0xb5, 0x63, 0x93, 0x16, 0xbd, 0x04, 0x43, 0xcc, 0x4b, 0xa6, 0xe6, 0x37, 0x84, 0xd4, - 0x26, 0x4f, 0xf8, 0xa1, 0x8a, 0x80, 0x1f, 0x6a, 0xbf, 0xb1, 0xa2, 0x46, 0x17, 0x61, 0x80, 0x36, - 0xa1, 0x5c, 0x11, 0xc7, 0x92, 0x52, 0x6d, 0x5d, 0x65, 0x50, 0x2c, 0xb0, 0xf6, 0x5f, 0x2c, 0x68, - 0xbd, 0x4c, 0xef, 0xc3, 0x04, 0x55, 0x60, 0xf0, 0x8e, 0xe3, 0x46, 0xae, 0xb7, 0x2d, 0xe4, 0x8f, - 0xa7, 0x3a, 0x9e, 0x51, 0xac, 0xd0, 0x6d, 0x5e, 0x80, 0x9f, 0xa2, 0xe2, 0x0f, 0x96, 0x6c, 0x28, - 0xc7, 0xa0, 0xed, 0x79, 0x94, 0x63, 0xa1, 0x57, 0x8e, 0x98, 0x17, 0xe0, 0x1c, 0xc5, 0x1f, 0x2c, - 0xd9, 0xa0, 0x37, 0x00, 0xe4, 0x0a, 0x23, 0x75, 0xe1, 0x9d, 0xf2, 0x4c, 0x77, 0xa6, 0x1b, 0xaa, - 0xcc, 0xe2, 0x18, 0x3d, 0xa3, 0xe3, 0xff, 0x58, 0xe3, 0x67, 0x47, 0x4c, 0x4e, 0x4b, 0x37, 0x06, - 0x7d, 0x9c, 0x4e, 0x71, 0x27, 0x88, 0x48, 0x7d, 0x21, 0x12, 0x9d, 0xf3, 0xde, 0xde, 0x2e, 0x29, - 0x1b, 0x6e, 0x93, 0xe8, 0xcb, 0x41, 0x30, 0xc1, 0x31, 0x3f, 0xfb, 0xe7, 0x8b, 0x30, 0x9d, 0xd7, - 0x5c, 0x3a, 0xe9, 0xc8, 0x5d, 0x37, 0x5a, 0xa2, 0xe2, 0x95, 0x65, 0x4e, 0xba, 0x15, 0x01, 0xc7, - 0x8a, 0x82, 0x8e, 0x7e, 0xe8, 0x6e, 0xcb, 0x3b, 0x66, 0x7f, 0x3c, 0xfa, 0x55, 0x06, 0xc5, 0x02, - 0x4b, 0xe9, 0x02, 0xe2, 0x84, 0xc2, 0xfd, 0x49, 0x9b, 0x25, 0x98, 0x41, 0xb1, 0xc0, 0xea, 0xda, - 0xae, 0xbe, 0x2e, 0xda, 0x2e, 0xa3, 0x8b, 0xfa, 0x8f, 0xb7, 0x8b, 0xd0, 0x27, 0x01, 0xb6, 0x5c, - 0xcf, 0x0d, 0x77, 0x18, 0xf7, 0x81, 0x23, 0x73, 0x57, 0xc2, 0xd9, 0xaa, 0xe2, 0x82, 0x35, 0x8e, - 0xe8, 0x45, 0x18, 0x56, 0x0b, 0xb0, 0xbc, 0xcc, 0x6c, 0xc1, 0x9a, 0x6f, 0x4d, 0xbc, 0x1b, 0x2d, - 0x63, 0x9d, 0xce, 0xfe, 0x74, 0x72, 0xbe, 0x88, 0x15, 0xa0, 0xf5, 0xaf, 0xd5, 0x6b, 0xff, 0x16, - 0x3a, 0xf7, 0xaf, 0xfd, 0x8d, 0x22, 0x8c, 0x1b, 0x95, 0xb5, 0xc3, 0x1e, 0xf6, 0xac, 0x2b, 0x74, - 0x03, 0x77, 0x22, 0x22, 0xd6, 0x9f, 0xdd, 0x7d, 0xa9, 0xe8, 0x9b, 0x3c, 0x5d, 0x01, 0xbc, 0x3c, - 0xfa, 0x24, 0x94, 0x1a, 0x4e, 0xc8, 0x34, 0x67, 0x44, 0xac, 0xbb, 0x5e, 0x98, 0xc5, 0x17, 0x13, - 0x27, 0x8c, 0xb4, 0x53, 0x93, 0xf3, 0x8e, 0x59, 0xd2, 0x93, 0x86, 0xca, 0x27, 0xd2, 0xbf, 0x4e, - 0x35, 0x82, 0x0a, 0x31, 0xfb, 0x98, 0xe3, 0xd0, 0x4b, 0x30, 0x12, 0x10, 0x36, 0x2b, 0x96, 0xa8, - 0x34, 0xc7, 0xa6, 0x59, 0x7f, 0x2c, 0xf6, 0x61, 0x0d, 0x87, 0x0d, 0xca, 0xf8, 0x6e, 0x30, 0xd0, - 0xe1, 0x6e, 0xf0, 0x14, 0x0c, 0xb2, 0x1f, 0x6a, 0x06, 0xa8, 0xd1, 0x28, 0x73, 0x30, 0x96, 0xf8, - 0xe4, 0x84, 0x19, 0xea, 0x6d, 0xc2, 0xd0, 0xdb, 0x87, 0x98, 0xd4, 0xcc, 0x0e, 0x3f, 0xc4, 0x77, - 0x39, 0x31, 0xe5, 0xb1, 0xc4, 0xd9, 0xef, 0x85, 0xb1, 0x65, 0x87, 0x34, 0x7d, 0x6f, 0xc5, 0xab, - 0xb7, 0x7c, 0xd7, 0x8b, 0xd0, 0x34, 0xf4, 0xb1, 0x43, 0x84, 0x6f, 0x01, 0x7d, 0xb4, 0x22, 0xcc, - 0x20, 0xf6, 0x36, 0x9c, 0x5e, 0xf6, 0xef, 0x78, 0x77, 0x9c, 0xa0, 0xbe, 0x50, 0x29, 0x6b, 0xf7, - 0xeb, 0x75, 0x79, 0xbf, 0xe3, 0x6e, 0x6d, 0x99, 0x5b, 0xaf, 0x56, 0x92, 0x8b, 0xb5, 0xab, 0x6e, - 0x83, 0xe4, 0x68, 0x41, 0xfe, 0x4a, 0xc1, 0xa8, 0x29, 0xa6, 0x57, 0x76, 0x38, 0x2b, 0xd7, 0x0e, - 0xf7, 0x3a, 0x0c, 0x6d, 0xb9, 0xa4, 0x51, 0xc7, 0x64, 0x4b, 0xcc, 0xc4, 0x27, 0xf3, 0x3d, 0x75, - 0x56, 0x29, 0xa5, 0xd4, 0x7a, 0xf1, 0xdb, 0xe1, 0xaa, 0x28, 0x8c, 0x15, 0x1b, 0xb4, 0x0b, 0x13, - 0xf2, 0xc2, 0x20, 0xb1, 0x62, 0x5e, 0x3e, 0xd5, 0xe9, 0x16, 0x62, 0x32, 0x3f, 0x75, 0xef, 0x60, - 0x76, 0x02, 0x27, 0xd8, 0xe0, 0x14, 0x63, 0x7a, 0x1d, 0x6c, 0xd2, 0x1d, 0xb8, 0x8f, 0x75, 0x3f, - 0xbb, 0x0e, 0xb2, 0x9b, 0x2d, 0x83, 0xda, 0x3f, 0x66, 0xc1, 0x43, 0xa9, 0x9e, 0x11, 0x37, 0xfc, - 0x63, 0x1e, 0x85, 0xe4, 0x8d, 0xbb, 0xd0, 0xfd, 0xc6, 0x6d, 0xff, 0x2d, 0x0b, 0x4e, 0xad, 0x34, - 0x5b, 0xd1, 0xfe, 0xb2, 0x6b, 0x1a, 0xcd, 0x3e, 0x00, 0x03, 0x4d, 0x52, 0x77, 0xdb, 0x4d, 0x31, - 0x72, 0xb3, 0x72, 0x97, 0x5a, 0x63, 0xd0, 0xc3, 0x83, 0xd9, 0xd1, 0x6a, 0xe4, 0x07, 0xce, 0x36, - 0xe1, 0x00, 0x2c, 0xc8, 0xd9, 0x5e, 0xef, 0xbe, 0x45, 0xae, 0xbb, 0x4d, 0x57, 0x7a, 0x5e, 0x75, - 0xd4, 0xd9, 0xcd, 0xc9, 0x0e, 0x9d, 0x7b, 0xbd, 0xed, 0x78, 0x91, 0x1b, 0xed, 0x0b, 0x7b, 0x97, - 0x64, 0x82, 0x63, 0x7e, 0xf6, 0xd7, 0x2d, 0x18, 0x97, 0xf3, 0x7e, 0xa1, 0x5e, 0x0f, 0x48, 0x18, - 0xa2, 0x19, 0x28, 0xb8, 0x2d, 0xd1, 0x4a, 0x10, 0xad, 0x2c, 0x94, 0x2b, 0xb8, 0xe0, 0xb6, 0xa4, - 0x58, 0xc6, 0x36, 0xc2, 0xa2, 0x69, 0xfa, 0xbb, 0x2a, 0xe0, 0x58, 0x51, 0xa0, 0x4b, 0x30, 0xe4, - 0xf9, 0x75, 0x6e, 0xe7, 0xe2, 0x47, 0x1a, 0x9b, 0x60, 0xeb, 0x02, 0x86, 0x15, 0x16, 0x55, 0xa0, - 0xc4, 0x1d, 0xc3, 0xe2, 0x49, 0xdb, 0x93, 0x7b, 0x19, 0xfb, 0xb2, 0x0d, 0x59, 0x12, 0xc7, 0x4c, - 0xec, 0x5f, 0xb1, 0x60, 0x44, 0x7e, 0x59, 0x8f, 0x32, 0x27, 0x5d, 0x5a, 0xb1, 0xbc, 0x19, 0x2f, - 0x2d, 0x2a, 0x33, 0x32, 0x8c, 0x21, 0x2a, 0x16, 0x8f, 0x24, 0x2a, 0x5e, 0x86, 0x61, 0xa7, 0xd5, - 0xaa, 0x98, 0x72, 0x26, 0x9b, 0x4a, 0x0b, 0x31, 0x18, 0xeb, 0x34, 0xf6, 0x8f, 0x16, 0x60, 0x4c, - 0x7e, 0x41, 0xb5, 0xbd, 0x19, 0x92, 0x08, 0x6d, 0x40, 0xc9, 0xe1, 0xa3, 0x44, 0xe4, 0x24, 0x7f, - 0x2c, 0x5b, 0x8f, 0x60, 0x0c, 0x69, 0x7c, 0xe0, 0x2f, 0xc8, 0xd2, 0x38, 0x66, 0x84, 0x1a, 0x30, - 0xe9, 0xf9, 0x11, 0xdb, 0xfc, 0x15, 0xbe, 0x93, 0x69, 0x27, 0xc9, 0xfd, 0xac, 0xe0, 0x3e, 0xb9, - 0x9e, 0xe4, 0x82, 0xd3, 0x8c, 0xd1, 0x8a, 0xd4, 0xcd, 0x14, 0xf3, 0x95, 0x01, 0xfa, 0xc0, 0x65, - 0xab, 0x66, 0xec, 0x5f, 0xb2, 0xa0, 0x24, 0xc9, 0x4e, 0xc2, 0x8a, 0xb7, 0x06, 0x83, 0x21, 0x1b, - 0x04, 0xd9, 0x35, 0x76, 0xa7, 0x86, 0xf3, 0xf1, 0x8a, 0xcf, 0x34, 0xfe, 0x3f, 0xc4, 0x92, 0x07, - 0x53, 0xcd, 0xab, 0xe6, 0xbf, 0x43, 0x54, 0xf3, 0xaa, 0x3d, 0x39, 0x87, 0xd2, 0x1f, 0xb0, 0x36, - 0x6b, 0xba, 0x2e, 0x2a, 0x7a, 0xb5, 0x02, 0xb2, 0xe5, 0xde, 0x4d, 0x8a, 0x5e, 0x15, 0x06, 0xc5, - 0x02, 0x8b, 0xde, 0x80, 0x91, 0x9a, 0xd4, 0xc9, 0xc6, 0x2b, 0xfc, 0x62, 0x47, 0xfb, 0x80, 0x32, - 0x25, 0x71, 0x5d, 0xc8, 0x92, 0x56, 0x1e, 0x1b, 0xdc, 0x4c, 0xc7, 0x87, 0x62, 0x37, 0xc7, 0x87, - 0x98, 0x6f, 0xbe, 0x1b, 0xc0, 0x8f, 0x5b, 0x30, 0xc0, 0x75, 0x71, 0xbd, 0xa9, 0x42, 0x35, 0xcb, - 0x5a, 0xdc, 0x77, 0xb7, 0x28, 0x50, 0x58, 0xca, 0xd0, 0x1a, 0x94, 0xd8, 0x0f, 0xa6, 0x4b, 0x2c, - 0xe6, 0xbf, 0x4b, 0xe0, 0xb5, 0xea, 0x0d, 0xbc, 0x25, 0x8b, 0xe1, 0x98, 0x83, 0xfd, 0x23, 0x45, - 0xba, 0xbb, 0xc5, 0xa4, 0xc6, 0xa1, 0x6f, 0x3d, 0xb8, 0x43, 0xbf, 0xf0, 0xa0, 0x0e, 0xfd, 0x6d, - 0x18, 0xaf, 0x69, 0x76, 0xb8, 0x78, 0x24, 0x2f, 0x75, 0x9c, 0x24, 0x9a, 0xc9, 0x8e, 0x6b, 0x59, - 0x96, 0x4c, 0x26, 0x38, 0xc9, 0x15, 0x7d, 0x1c, 0x46, 0xf8, 0x38, 0x8b, 0x5a, 0xb8, 0xef, 0xc8, - 0x13, 0xf9, 0xf3, 0x45, 0xaf, 0x82, 0x6b, 0xe5, 0xb4, 0xe2, 0xd8, 0x60, 0x66, 0xff, 0xb1, 0x05, - 0x68, 0xa5, 0xb5, 0x43, 0x9a, 0x24, 0x70, 0x1a, 0xb1, 0x3a, 0xfd, 0x07, 0x2d, 0x98, 0x26, 0x29, - 0xf0, 0x92, 0xdf, 0x6c, 0x8a, 0x4b, 0x4b, 0xce, 0xbd, 0x7a, 0x25, 0xa7, 0x8c, 0x7a, 0xb8, 0x31, - 0x9d, 0x47, 0x81, 0x73, 0xeb, 0x43, 0x6b, 0x30, 0xc5, 0x4f, 0x49, 0x85, 0xd0, 0xfc, 0x50, 0x1e, - 0x16, 0x8c, 0xa7, 0x36, 0xd2, 0x24, 0x38, 0xab, 0x9c, 0xfd, 0x3d, 0x23, 0x90, 0xdb, 0x8a, 0x77, - 0xed, 0x08, 0xef, 0xda, 0x11, 0xde, 0xb5, 0x23, 0xbc, 0x6b, 0x47, 0x78, 0xd7, 0x8e, 0xf0, 0x6d, - 0x6f, 0x47, 0xf8, 0x4b, 0x16, 0x9c, 0x56, 0xc7, 0x80, 0x71, 0xf1, 0xfd, 0x0c, 0x4c, 0xf1, 0xe5, - 0x66, 0xf8, 0x2e, 0x8a, 0x63, 0xef, 0x72, 0xe6, 0xcc, 0x4d, 0xf8, 0xd8, 0x1a, 0x05, 0xf9, 0x63, - 0x85, 0x0c, 0x04, 0xce, 0xaa, 0xc6, 0xfe, 0xf9, 0x21, 0xe8, 0x5f, 0xd9, 0x23, 0x5e, 0x74, 0x02, - 0x57, 0x84, 0x1a, 0x8c, 0xb9, 0xde, 0x9e, 0xdf, 0xd8, 0x23, 0x75, 0x8e, 0x3f, 0xca, 0x4d, 0xf6, - 0x8c, 0x60, 0x3d, 0x56, 0x36, 0x58, 0xe0, 0x04, 0xcb, 0x07, 0xa1, 0x4d, 0xbe, 0x02, 0x03, 0x7c, - 0x13, 0x17, 0xaa, 0xe4, 0xcc, 0x3d, 0x9b, 0x75, 0xa2, 0x38, 0x9a, 0x62, 0x4d, 0x37, 0x3f, 0x24, - 0x44, 0x71, 0xf4, 0x69, 0x18, 0xdb, 0x72, 0x83, 0x30, 0xda, 0x70, 0x9b, 0x24, 0x8c, 0x9c, 0x66, - 0xeb, 0x3e, 0xb4, 0xc7, 0xaa, 0x1f, 0x56, 0x0d, 0x4e, 0x38, 0xc1, 0x19, 0x6d, 0xc3, 0x68, 0xc3, - 0xd1, 0xab, 0x1a, 0x3c, 0x72, 0x55, 0xea, 0x74, 0xb8, 0xae, 0x33, 0xc2, 0x26, 0x5f, 0xba, 0x9c, - 0x6a, 0x4c, 0x01, 0x3a, 0xc4, 0xd4, 0x02, 0x6a, 0x39, 0x71, 0xcd, 0x27, 0xc7, 0x51, 0x41, 0x87, - 0x39, 0xc8, 0x96, 0x4c, 0x41, 0x47, 0x73, 0x83, 0xfd, 0x14, 0x94, 0x08, 0xed, 0x42, 0xca, 0x58, - 0x1c, 0x30, 0xf3, 0xbd, 0xb5, 0x75, 0xcd, 0xad, 0x05, 0xbe, 0xa9, 0xb7, 0x5f, 0x91, 0x9c, 0x70, - 0xcc, 0x14, 0x2d, 0xc1, 0x40, 0x48, 0x02, 0x97, 0x84, 0xe2, 0xa8, 0xe9, 0x30, 0x8c, 0x8c, 0x8c, - 0xbf, 0x86, 0xe1, 0xbf, 0xb1, 0x28, 0x4a, 0xa7, 0x97, 0xc3, 0x54, 0x9a, 0xec, 0x30, 0xd0, 0xa6, - 0xd7, 0x02, 0x83, 0x62, 0x81, 0x45, 0xaf, 0xc1, 0x60, 0x40, 0x1a, 0xcc, 0x30, 0x34, 0xda, 0xfb, - 0x24, 0xe7, 0x76, 0x26, 0x5e, 0x0e, 0x4b, 0x06, 0xe8, 0x1a, 0xa0, 0x80, 0x50, 0x41, 0xc9, 0xf5, - 0xb6, 0x95, 0xdb, 0xa8, 0xd8, 0x68, 0x95, 0x40, 0x8a, 0x63, 0x0a, 0xf9, 0x10, 0x0a, 0x67, 0x14, - 0x43, 0x57, 0x60, 0x52, 0x41, 0xcb, 0x5e, 0x18, 0x39, 0x74, 0x83, 0x1b, 0x67, 0xbc, 0x94, 0x9e, - 0x02, 0x27, 0x09, 0x70, 0xba, 0x8c, 0xfd, 0x25, 0x0b, 0x78, 0x3f, 0x9f, 0xc0, 0xed, 0xfc, 0x55, - 0xf3, 0x76, 0x7e, 0x36, 0x77, 0xe4, 0x72, 0x6e, 0xe6, 0x5f, 0xb2, 0x60, 0x58, 0x1b, 0xd9, 0x78, - 0xce, 0x5a, 0x1d, 0xe6, 0x6c, 0x1b, 0x26, 0xe8, 0x4c, 0xbf, 0xb1, 0x19, 0x92, 0x60, 0x8f, 0xd4, - 0xd9, 0xc4, 0x2c, 0xdc, 0xdf, 0xc4, 0x54, 0x2e, 0x6a, 0xd7, 0x13, 0x0c, 0x71, 0xaa, 0x0a, 0xfb, - 0x53, 0xb2, 0xa9, 0xca, 0xa3, 0xaf, 0xa6, 0xc6, 0x3c, 0xe1, 0xd1, 0xa7, 0x46, 0x15, 0xc7, 0x34, - 0x74, 0xa9, 0xed, 0xf8, 0x61, 0x94, 0xf4, 0xe8, 0xbb, 0xea, 0x87, 0x11, 0x66, 0x18, 0xfb, 0x79, - 0x80, 0x95, 0xbb, 0xa4, 0xc6, 0x67, 0xac, 0x7e, 0x79, 0xb0, 0xf2, 0x2f, 0x0f, 0xf6, 0x6f, 0x5a, - 0x30, 0xb6, 0xba, 0x64, 0x9c, 0x5c, 0x73, 0x00, 0xfc, 0xc6, 0x73, 0xfb, 0xf6, 0xba, 0x34, 0x87, - 0x73, 0x8b, 0xa6, 0x82, 0x62, 0x8d, 0x02, 0x9d, 0x85, 0x62, 0xa3, 0xed, 0x09, 0xf5, 0xe1, 0x20, - 0x3d, 0x1e, 0xaf, 0xb7, 0x3d, 0x4c, 0x61, 0xda, 0x23, 0x88, 0x62, 0xcf, 0x8f, 0x20, 0xba, 0x06, - 0x3f, 0x40, 0xb3, 0xd0, 0x7f, 0xe7, 0x8e, 0x5b, 0xe7, 0x4f, 0x4c, 0x85, 0xa9, 0xfe, 0xf6, 0xed, - 0xf2, 0x72, 0x88, 0x39, 0xdc, 0xfe, 0x42, 0x11, 0x66, 0x56, 0x1b, 0xe4, 0xee, 0xdb, 0x7c, 0x66, - 0xdb, 0xeb, 0x13, 0x8e, 0xa3, 0x29, 0x62, 0x8e, 0xfa, 0x4c, 0xa7, 0x7b, 0x7f, 0x6c, 0xc1, 0x20, - 0x77, 0x68, 0x93, 0x8f, 0x6e, 0x5f, 0xc9, 0xaa, 0x3d, 0xbf, 0x43, 0xe6, 0xb8, 0x63, 0x9c, 0x78, - 0xc3, 0xa7, 0x0e, 0x4c, 0x01, 0xc5, 0x92, 0xf9, 0xcc, 0xcb, 0x30, 0xa2, 0x53, 0x1e, 0xe9, 0xc1, - 0xdc, 0x9f, 0x2b, 0xc2, 0x04, 0x6d, 0xc1, 0x03, 0x1d, 0x88, 0x9b, 0xe9, 0x81, 0x38, 0xee, 0x47, - 0x53, 0xdd, 0x47, 0xe3, 0x8d, 0xe4, 0x68, 0x5c, 0xce, 0x1b, 0x8d, 0x93, 0x1e, 0x83, 0xef, 0xb6, - 0x60, 0x6a, 0xb5, 0xe1, 0xd7, 0x76, 0x13, 0x0f, 0x9b, 0x5e, 0x84, 0x61, 0xba, 0x1d, 0x87, 0xc6, - 0x1b, 0x7f, 0x23, 0xea, 0x83, 0x40, 0x61, 0x9d, 0x4e, 0x2b, 0x76, 0xf3, 0x66, 0x79, 0x39, 0x2b, - 0x58, 0x84, 0x40, 0x61, 0x9d, 0xce, 0xfe, 0x75, 0x0b, 0xce, 0x5d, 0x59, 0x5a, 0x89, 0xa7, 0x62, - 0x2a, 0x5e, 0xc5, 0x45, 0x18, 0x68, 0xd5, 0xb5, 0xa6, 0xc4, 0xea, 0xd5, 0x65, 0xd6, 0x0a, 0x81, - 0x7d, 0xa7, 0xc4, 0x62, 0xb9, 0x09, 0x70, 0x05, 0x57, 0x96, 0xc4, 0xbe, 0x2b, 0xad, 0x29, 0x56, - 0xae, 0x35, 0xe5, 0x09, 0x18, 0xa4, 0xe7, 0x82, 0x5b, 0x93, 0xed, 0xe6, 0x06, 0x5a, 0x0e, 0xc2, - 0x12, 0x67, 0xff, 0x8c, 0x05, 0x53, 0x57, 0xdc, 0x88, 0x1e, 0xda, 0xc9, 0x80, 0x0c, 0xf4, 0xd4, - 0x0e, 0xdd, 0xc8, 0x0f, 0xf6, 0x93, 0x01, 0x19, 0xb0, 0xc2, 0x60, 0x8d, 0x8a, 0x7f, 0xd0, 0x9e, - 0xcb, 0x3c, 0xb4, 0x0b, 0xa6, 0xfd, 0x0a, 0x0b, 0x38, 0x56, 0x14, 0xb4, 0xbf, 0xea, 0x6e, 0xc0, - 0x54, 0x7f, 0xfb, 0x62, 0xe3, 0x56, 0xfd, 0xb5, 0x2c, 0x11, 0x38, 0xa6, 0xb1, 0xff, 0xd0, 0x82, - 0xd9, 0x2b, 0x8d, 0x76, 0x18, 0x91, 0x60, 0x2b, 0xcc, 0xd9, 0x74, 0x9f, 0x87, 0x12, 0x91, 0x8a, - 0x76, 0xf9, 0x94, 0x4c, 0x0a, 0xa2, 0x4a, 0x03, 0xcf, 0xe3, 0x42, 0x28, 0xba, 0x1e, 0x5e, 0x5f, - 0x1e, 0xed, 0xf9, 0xdc, 0x2a, 0x20, 0xa2, 0xd7, 0xa5, 0x07, 0xca, 0x60, 0x2f, 0xee, 0x57, 0x52, - 0x58, 0x9c, 0x51, 0xc2, 0xfe, 0x31, 0x0b, 0x4e, 0xab, 0x0f, 0x7e, 0xc7, 0x7d, 0xa6, 0xfd, 0xd5, - 0x02, 0x8c, 0x5e, 0xdd, 0xd8, 0xa8, 0x5c, 0x21, 0x91, 0x36, 0x2b, 0x3b, 0x9b, 0xcf, 0xb1, 0x66, - 0x05, 0xec, 0x74, 0x47, 0x6c, 0x47, 0x6e, 0x63, 0x8e, 0xc7, 0x5b, 0x9a, 0x2b, 0x7b, 0xd1, 0x8d, - 0xa0, 0x1a, 0x05, 0xae, 0xb7, 0x9d, 0x39, 0xd3, 0xa5, 0xcc, 0x52, 0xcc, 0x93, 0x59, 0xd0, 0xf3, - 0x30, 0xc0, 0x02, 0x3e, 0xc9, 0x41, 0x78, 0x58, 0x5d, 0xb1, 0x18, 0xf4, 0xf0, 0x60, 0xb6, 0x74, - 0x13, 0x97, 0xf9, 0x1f, 0x2c, 0x48, 0xd1, 0x4d, 0x18, 0xde, 0x89, 0xa2, 0xd6, 0x55, 0xe2, 0xd4, - 0x49, 0x20, 0x77, 0xd9, 0xf3, 0x59, 0xbb, 0x2c, 0xed, 0x04, 0x4e, 0x16, 0x6f, 0x4c, 0x31, 0x2c, - 0xc4, 0x3a, 0x1f, 0xbb, 0x0a, 0x10, 0xe3, 0x8e, 0xc9, 0x00, 0x62, 0x6f, 0x40, 0x89, 0x7e, 0xee, - 0x42, 0xc3, 0x75, 0x3a, 0x9b, 0x98, 0x9f, 0x86, 0x92, 0x34, 0x20, 0x87, 0xe2, 0x75, 0x38, 0x3b, - 0x91, 0xa4, 0x7d, 0x39, 0xc4, 0x31, 0xde, 0xde, 0x82, 0x53, 0xcc, 0x1d, 0xd0, 0x89, 0x76, 0x8c, - 0xd9, 0xd7, 0x7d, 0x98, 0x9f, 0x11, 0x37, 0x36, 0xde, 0xe6, 0x69, 0xed, 0x39, 0xe3, 0x88, 0xe4, - 0x18, 0xdf, 0xde, 0xec, 0x6f, 0xf4, 0xc1, 0xc3, 0xe5, 0x6a, 0x7e, 0xc0, 0x92, 0x97, 0x60, 0x84, - 0x0b, 0x82, 0x74, 0xd0, 0x9d, 0x86, 0xa8, 0x57, 0xe9, 0x36, 0x37, 0x34, 0x1c, 0x36, 0x28, 0xd1, - 0x39, 0x28, 0xba, 0x6f, 0x7a, 0xc9, 0xc7, 0x3e, 0xe5, 0xd7, 0xd7, 0x31, 0x85, 0x53, 0x34, 0x95, - 0x29, 0xf9, 0x66, 0xad, 0xd0, 0x4a, 0xae, 0x7c, 0x15, 0xc6, 0xdc, 0xb0, 0x16, 0xba, 0x65, 0x8f, - 0xae, 0x40, 0x6d, 0x0d, 0x2b, 0x6d, 0x02, 0x6d, 0xb4, 0xc2, 0xe2, 0x04, 0xb5, 0x76, 0x72, 0xf4, - 0xf7, 0x2c, 0x97, 0x76, 0x7d, 0x2e, 0x4d, 0x37, 0xf6, 0x16, 0xfb, 0xba, 0x90, 0x29, 0xa9, 0xc5, - 0xc6, 0xce, 0x3f, 0x38, 0xc4, 0x12, 0x47, 0xaf, 0x6a, 0xb5, 0x1d, 0xa7, 0xb5, 0xd0, 0x8e, 0x76, - 0x96, 0xdd, 0xb0, 0xe6, 0xef, 0x91, 0x60, 0x9f, 0xdd, 0xb2, 0x87, 0xe2, 0xab, 0x9a, 0x42, 0x2c, - 0x5d, 0x5d, 0xa8, 0x50, 0x4a, 0x9c, 0x2e, 0x83, 0x16, 0x60, 0x5c, 0x02, 0xab, 0x24, 0x64, 0x9b, - 0xfb, 0x30, 0x63, 0xa3, 0x9e, 0xdf, 0x08, 0xb0, 0x62, 0x92, 0xa4, 0x37, 0x45, 0x57, 0x38, 0x0e, - 0xd1, 0xf5, 0x03, 0x30, 0xea, 0x7a, 0x6e, 0xe4, 0x3a, 0x91, 0xcf, 0x2d, 0x2c, 0xfc, 0x42, 0xcd, - 0x54, 0xc7, 0x65, 0x1d, 0x81, 0x4d, 0x3a, 0xfb, 0x3f, 0xf5, 0xc1, 0x24, 0x1b, 0xb6, 0x77, 0x67, - 0xd8, 0xb7, 0xd3, 0x0c, 0xbb, 0x99, 0x9e, 0x61, 0xc7, 0x21, 0x93, 0xdf, 0xf7, 0x34, 0xfb, 0x34, - 0x94, 0xd4, 0x8b, 0x23, 0xf9, 0xe4, 0xd0, 0xca, 0x79, 0x72, 0xd8, 0xfd, 0x5c, 0x96, 0x4e, 0x5b, - 0xc5, 0x4c, 0xa7, 0xad, 0x2f, 0x5b, 0x10, 0x9b, 0x0c, 0xd0, 0xeb, 0x50, 0x6a, 0xf9, 0xcc, 0x17, - 0x31, 0x90, 0x0e, 0xbe, 0x8f, 0x77, 0xb4, 0x39, 0xf0, 0x98, 0x4d, 0x01, 0xef, 0x85, 0x8a, 0x2c, - 0x8a, 0x63, 0x2e, 0xe8, 0x1a, 0x0c, 0xb6, 0x02, 0x52, 0x8d, 0x58, 0x40, 0x91, 0xde, 0x19, 0xf2, - 0x59, 0xc3, 0x0b, 0x62, 0xc9, 0xc1, 0xfe, 0xcf, 0x16, 0x4c, 0x24, 0x49, 0xd1, 0x07, 0xa1, 0x8f, - 0xdc, 0x25, 0x35, 0xd1, 0xde, 0xcc, 0x43, 0x36, 0x56, 0x3a, 0xf0, 0x0e, 0xa0, 0xff, 0x31, 0x2b, - 0x85, 0xae, 0xc2, 0x20, 0x3d, 0x61, 0xaf, 0xa8, 0xe0, 0x59, 0x8f, 0xe6, 0x9d, 0xd2, 0x4a, 0x54, - 0xe1, 0x8d, 0x13, 0x20, 0x2c, 0x8b, 0x33, 0x4f, 0xa9, 0x5a, 0xab, 0x4a, 0x2f, 0x2f, 0x51, 0xa7, - 0x3b, 0xf6, 0xc6, 0x52, 0x85, 0x13, 0x09, 0x6e, 0xdc, 0x53, 0x4a, 0x02, 0x71, 0xcc, 0xc4, 0xfe, - 0x59, 0x0b, 0x80, 0x3b, 0x86, 0x39, 0xde, 0x36, 0x39, 0x01, 0x3d, 0xf9, 0x32, 0xf4, 0x85, 0x2d, - 0x52, 0xeb, 0xe4, 0x26, 0x1b, 0xb7, 0xa7, 0xda, 0x22, 0xb5, 0x78, 0xc6, 0xd1, 0x7f, 0x98, 0x95, - 0xb6, 0xbf, 0x17, 0x60, 0x2c, 0x26, 0x2b, 0x47, 0xa4, 0x89, 0x9e, 0x35, 0xc2, 0x14, 0x9c, 0x4d, - 0x84, 0x29, 0x28, 0x31, 0x6a, 0x4d, 0x25, 0xfb, 0x69, 0x28, 0x36, 0x9d, 0xbb, 0x42, 0xe7, 0xf6, - 0x74, 0xe7, 0x66, 0x50, 0xfe, 0x73, 0x6b, 0xce, 0x5d, 0x7e, 0x2d, 0x7d, 0x5a, 0xae, 0x90, 0x35, - 0xe7, 0xee, 0x21, 0x77, 0x86, 0x65, 0xbb, 0xf4, 0x75, 0x37, 0x8c, 0x3e, 0xf7, 0x1f, 0xe3, 0xff, - 0x6c, 0xdd, 0xd1, 0x4a, 0x58, 0x5d, 0xae, 0x27, 0x7c, 0x9e, 0x7a, 0xaa, 0xcb, 0xf5, 0x92, 0x75, - 0xb9, 0x5e, 0x0f, 0x75, 0xb9, 0x1e, 0x7a, 0x0b, 0x06, 0x85, 0x4b, 0xa2, 0x08, 0x64, 0x34, 0xdf, - 0x43, 0x7d, 0xc2, 0xa3, 0x91, 0xd7, 0x39, 0x2f, 0xaf, 0xdd, 0x02, 0xda, 0xb5, 0x5e, 0x59, 0x21, - 0xfa, 0xcb, 0x16, 0x8c, 0x89, 0xdf, 0x98, 0xbc, 0xd9, 0x26, 0x61, 0x24, 0xc4, 0xd2, 0xf7, 0xf7, - 0xde, 0x06, 0x51, 0x90, 0x37, 0xe5, 0xfd, 0xf2, 0x9c, 0x31, 0x91, 0x5d, 0x5b, 0x94, 0x68, 0x05, - 0xfa, 0x3b, 0x16, 0x9c, 0x6a, 0x3a, 0x77, 0x79, 0x8d, 0x1c, 0x86, 0x9d, 0xc8, 0xf5, 0x85, 0x69, - 0xff, 0x83, 0xbd, 0x0d, 0x7f, 0xaa, 0x38, 0x6f, 0xa4, 0xb4, 0x3f, 0x9e, 0xca, 0x22, 0xe9, 0xda, - 0xd4, 0xcc, 0x76, 0xcd, 0x6c, 0xc1, 0x90, 0x9c, 0x6f, 0x19, 0xca, 0x8d, 0x65, 0x5d, 0xe6, 0x3e, - 0xb2, 0x47, 0xa8, 0xfe, 0xfc, 0x9f, 0xd6, 0x23, 0xe6, 0xda, 0x03, 0xad, 0xe7, 0xd3, 0x30, 0xa2, - 0xcf, 0xb1, 0x07, 0x5a, 0xd7, 0x9b, 0x30, 0x95, 0x31, 0x97, 0x1e, 0x68, 0x95, 0x77, 0xe0, 0x6c, - 0xee, 0xfc, 0x78, 0x90, 0x15, 0xdb, 0x5f, 0xb5, 0xf4, 0x7d, 0xf0, 0x04, 0x8c, 0x15, 0x4b, 0xa6, - 0xb1, 0xe2, 0x7c, 0xe7, 0x95, 0x93, 0x63, 0xb1, 0x78, 0x43, 0x6f, 0x34, 0xdd, 0xd5, 0xd1, 0x6b, - 0x30, 0xd0, 0xa0, 0x10, 0xe9, 0xd8, 0x6a, 0x77, 0x5f, 0x91, 0xb1, 0x30, 0xc9, 0xe0, 0x21, 0x16, - 0x1c, 0xec, 0x5f, 0xb0, 0xa0, 0xef, 0x04, 0x7a, 0x02, 0x9b, 0x3d, 0xf1, 0x6c, 0x2e, 0x6b, 0x11, - 0xd3, 0x79, 0x0e, 0x3b, 0x77, 0x56, 0xee, 0x46, 0xc4, 0x0b, 0xd9, 0x89, 0x9c, 0xd9, 0x31, 0x3f, - 0x69, 0xc1, 0xd4, 0x75, 0xdf, 0xa9, 0x2f, 0x3a, 0x0d, 0xc7, 0xab, 0x91, 0xa0, 0xec, 0x6d, 0x1f, - 0xc9, 0x2b, 0xbb, 0xd0, 0xd5, 0x2b, 0x7b, 0x49, 0x3a, 0x35, 0xf5, 0xe5, 0x8f, 0x1f, 0x95, 0xa4, - 0x93, 0x81, 0x5b, 0x0c, 0xf7, 0xdb, 0x1d, 0x40, 0x7a, 0x2b, 0xc5, 0x1b, 0x19, 0x0c, 0x83, 0x2e, - 0x6f, 0xaf, 0x18, 0xc4, 0x27, 0xb3, 0x25, 0xdc, 0xd4, 0xe7, 0x69, 0xaf, 0x3f, 0x38, 0x00, 0x4b, - 0x46, 0xf6, 0x4b, 0x90, 0xf9, 0xd0, 0xbe, 0xbb, 0x5e, 0xc2, 0xfe, 0x28, 0x4c, 0xb2, 0x92, 0x47, - 0xd4, 0x0c, 0xd8, 0x09, 0x6d, 0x6a, 0x46, 0xd0, 0x40, 0xfb, 0xf3, 0x16, 0x8c, 0xaf, 0x27, 0x62, - 0xa9, 0x5d, 0x64, 0xf6, 0xd7, 0x0c, 0x25, 0x7e, 0x95, 0x41, 0xb1, 0xc0, 0x1e, 0xbb, 0x92, 0xeb, - 0x4f, 0x2d, 0x88, 0x63, 0x5f, 0x9c, 0x80, 0xf8, 0xb6, 0x64, 0x88, 0x6f, 0x99, 0x82, 0xac, 0x6a, - 0x4e, 0x9e, 0xf4, 0x86, 0xae, 0xa9, 0xa8, 0x50, 0x1d, 0x64, 0xd8, 0x98, 0x0d, 0x9f, 0x8a, 0x63, - 0x66, 0xe8, 0x28, 0x19, 0x27, 0xca, 0xfe, 0xad, 0x02, 0x20, 0x45, 0xdb, 0x73, 0xd4, 0xaa, 0x74, - 0x89, 0xe3, 0x89, 0x5a, 0xb5, 0x07, 0x88, 0x79, 0x10, 0x04, 0x8e, 0x17, 0x72, 0xb6, 0xae, 0x50, - 0xeb, 0x1d, 0xcd, 0x3d, 0x61, 0x46, 0x54, 0x89, 0xae, 0xa7, 0xb8, 0xe1, 0x8c, 0x1a, 0x34, 0xcf, - 0x90, 0xfe, 0x5e, 0x3d, 0x43, 0x06, 0xba, 0xbc, 0x83, 0xfb, 0x8a, 0x05, 0xa3, 0xaa, 0x9b, 0xde, - 0x21, 0x5e, 0xea, 0xaa, 0x3d, 0x39, 0x1b, 0x68, 0x45, 0x6b, 0x32, 0x3b, 0x58, 0xbe, 0x93, 0xbd, - 0x67, 0x74, 0x1a, 0xee, 0x5b, 0x44, 0x45, 0x39, 0x9c, 0x15, 0xef, 0x13, 0x05, 0xf4, 0xf0, 0x60, - 0x76, 0x54, 0xfd, 0xe3, 0x51, 0x9c, 0xe3, 0x22, 0x74, 0x4b, 0x1e, 0x4f, 0x4c, 0x45, 0xf4, 0x22, - 0xf4, 0xb7, 0x76, 0x9c, 0x90, 0x24, 0x5e, 0xf3, 0xf4, 0x57, 0x28, 0xf0, 0xf0, 0x60, 0x76, 0x4c, - 0x15, 0x60, 0x10, 0xcc, 0xa9, 0x7b, 0x8f, 0x05, 0x96, 0x9e, 0x9c, 0x5d, 0x63, 0x81, 0xfd, 0xb1, - 0x05, 0x7d, 0xeb, 0x7e, 0xfd, 0x24, 0xb6, 0x80, 0x57, 0x8d, 0x2d, 0xe0, 0x91, 0xbc, 0x00, 0xfb, - 0xb9, 0xab, 0x7f, 0x35, 0xb1, 0xfa, 0xcf, 0xe7, 0x72, 0xe8, 0xbc, 0xf0, 0x9b, 0x30, 0xcc, 0xc2, - 0xf6, 0x8b, 0x97, 0x4b, 0xcf, 0x1b, 0x0b, 0x7e, 0x36, 0xb1, 0xe0, 0xc7, 0x35, 0x52, 0x6d, 0xa5, - 0x3f, 0x05, 0x83, 0xe2, 0x29, 0x4c, 0xf2, 0x59, 0xa8, 0xa0, 0xc5, 0x12, 0x6f, 0xff, 0x78, 0x11, - 0x8c, 0x34, 0x01, 0xe8, 0x97, 0x2c, 0x98, 0x0b, 0xb8, 0x8b, 0x6c, 0x7d, 0xb9, 0x1d, 0xb8, 0xde, - 0x76, 0xb5, 0xb6, 0x43, 0xea, 0xed, 0x86, 0xeb, 0x6d, 0x97, 0xb7, 0x3d, 0x5f, 0x81, 0x57, 0xee, - 0x92, 0x5a, 0x9b, 0x99, 0xdd, 0xba, 0xe4, 0x24, 0x50, 0xae, 0xe6, 0xcf, 0xdd, 0x3b, 0x98, 0x9d, - 0xc3, 0x47, 0xe2, 0x8d, 0x8f, 0xd8, 0x16, 0xf4, 0xeb, 0x16, 0xcc, 0xf3, 0xe8, 0xf9, 0xbd, 0xb7, - 0xbf, 0xc3, 0x6d, 0xb9, 0x22, 0x59, 0xc5, 0x4c, 0x36, 0x48, 0xd0, 0x5c, 0xfc, 0x80, 0xe8, 0xd0, - 0xf9, 0xca, 0xd1, 0xea, 0xc2, 0x47, 0x6d, 0x9c, 0xfd, 0x8f, 0x8b, 0x30, 0x2a, 0x62, 0x46, 0x89, - 0x33, 0xe0, 0x45, 0x63, 0x4a, 0x3c, 0x9a, 0x98, 0x12, 0x93, 0x06, 0xf1, 0xf1, 0x6c, 0xff, 0x21, - 0x4c, 0xd2, 0xcd, 0xf9, 0x2a, 0x71, 0x82, 0x68, 0x93, 0x38, 0xdc, 0xe1, 0xab, 0x78, 0xe4, 0xdd, - 0x5f, 0xe9, 0x27, 0xaf, 0x27, 0x99, 0xe1, 0x34, 0xff, 0x6f, 0xa7, 0x33, 0xc7, 0x83, 0x89, 0x54, - 0xd8, 0xaf, 0x8f, 0x41, 0x49, 0xbd, 0xe3, 0x10, 0x9b, 0x4e, 0xe7, 0xe8, 0x79, 0x49, 0x0e, 0x5c, - 0xfd, 0x15, 0xbf, 0x21, 0x8a, 0xd9, 0xd9, 0x7f, 0xaf, 0x60, 0x54, 0xc8, 0x07, 0x71, 0x1d, 0x86, - 0x9c, 0x30, 0x74, 0xb7, 0x3d, 0x52, 0xef, 0xa4, 0xa1, 0x4c, 0x55, 0xc3, 0xde, 0xd2, 0x2c, 0x88, - 0x92, 0x58, 0xf1, 0x40, 0x57, 0xb9, 0x5b, 0xdd, 0x1e, 0xe9, 0xa4, 0x9e, 0x4c, 0x71, 0x03, 0xe9, - 0x78, 0xb7, 0x47, 0xb0, 0x28, 0x8f, 0x3e, 0xc1, 0xfd, 0x1e, 0xaf, 0x79, 0xfe, 0x1d, 0xef, 0x8a, - 0xef, 0xcb, 0xb8, 0x0c, 0xbd, 0x31, 0x9c, 0x94, 0xde, 0x8e, 0xaa, 0x38, 0x36, 0xb9, 0xf5, 0x16, - 0x47, 0xf3, 0x33, 0xc0, 0xa2, 0x85, 0x9b, 0xcf, 0xa6, 0x43, 0x44, 0x60, 0x5c, 0x04, 0x24, 0x93, - 0x30, 0xd1, 0x77, 0x99, 0x57, 0x39, 0xb3, 0x74, 0xac, 0x48, 0xbf, 0x66, 0xb2, 0xc0, 0x49, 0x9e, - 0xf6, 0x4f, 0x5b, 0xc0, 0x9e, 0x90, 0x9e, 0x80, 0x3c, 0xf2, 0x21, 0x53, 0x1e, 0x99, 0xce, 0xeb, - 0xe4, 0x1c, 0x51, 0xe4, 0x05, 0x3e, 0xb3, 0x2a, 0x81, 0x7f, 0x77, 0x5f, 0x38, 0xab, 0x74, 0xbf, - 0x7f, 0xd8, 0xff, 0xc7, 0xe2, 0x9b, 0x98, 0x7a, 0x65, 0x81, 0x3e, 0x0b, 0x43, 0x35, 0xa7, 0xe5, - 0xd4, 0x78, 0x4e, 0x9b, 0x5c, 0x8d, 0x9e, 0x51, 0x68, 0x6e, 0x49, 0x94, 0xe0, 0x1a, 0x2a, 0x19, - 0xd8, 0x6e, 0x48, 0x82, 0xbb, 0x6a, 0xa5, 0x54, 0x95, 0x33, 0xbb, 0x30, 0x6a, 0x30, 0x7b, 0xa0, - 0xea, 0x8c, 0xcf, 0xf2, 0x23, 0x56, 0x05, 0x62, 0x6c, 0xc2, 0xa4, 0xa7, 0xfd, 0xa7, 0x07, 0x8a, - 0xbc, 0x5c, 0x3e, 0xde, 0xed, 0x10, 0x65, 0xa7, 0x8f, 0xf6, 0x3a, 0x35, 0xc1, 0x06, 0xa7, 0x39, - 0xdb, 0x3f, 0x61, 0xc1, 0x43, 0x3a, 0xa1, 0xf6, 0x00, 0xa6, 0x9b, 0x91, 0x64, 0x19, 0x86, 0xfc, - 0x16, 0x09, 0x9c, 0xc8, 0x0f, 0xc4, 0xa9, 0x71, 0x49, 0x76, 0xfa, 0x0d, 0x01, 0x3f, 0x14, 0x11, - 0xda, 0x25, 0x77, 0x09, 0xc7, 0xaa, 0x24, 0xbd, 0x7d, 0xb2, 0xce, 0x08, 0xc5, 0x53, 0x27, 0xb6, - 0x07, 0x30, 0x4b, 0x7a, 0x88, 0x05, 0xc6, 0xfe, 0x86, 0xc5, 0x27, 0x96, 0xde, 0x74, 0xf4, 0x26, - 0x4c, 0x34, 0x9d, 0xa8, 0xb6, 0xb3, 0x72, 0xb7, 0x15, 0x70, 0x93, 0x93, 0xec, 0xa7, 0xa7, 0xbb, - 0xf5, 0x93, 0xf6, 0x91, 0xb1, 0x2b, 0xe7, 0x5a, 0x82, 0x19, 0x4e, 0xb1, 0x47, 0x9b, 0x30, 0xcc, - 0x60, 0xec, 0x15, 0x5f, 0xd8, 0x49, 0x34, 0xc8, 0xab, 0x4d, 0x39, 0x23, 0xac, 0xc5, 0x7c, 0xb0, - 0xce, 0xd4, 0xfe, 0x72, 0x91, 0xaf, 0x76, 0x26, 0xca, 0x3f, 0x05, 0x83, 0x2d, 0xbf, 0xbe, 0x54, - 0x5e, 0xc6, 0x62, 0x14, 0xd4, 0x31, 0x52, 0xe1, 0x60, 0x2c, 0xf1, 0xe8, 0x12, 0x0c, 0x89, 0x9f, - 0xd2, 0x44, 0xc8, 0xf6, 0x66, 0x41, 0x17, 0x62, 0x85, 0x45, 0xcf, 0x01, 0xb4, 0x02, 0x7f, 0xcf, - 0xad, 0xb3, 0xe8, 0x12, 0x45, 0xd3, 0x8f, 0xa8, 0xa2, 0x30, 0x58, 0xa3, 0x42, 0xaf, 0xc0, 0x68, - 0xdb, 0x0b, 0xb9, 0x38, 0xa2, 0xc5, 0x92, 0x55, 0x1e, 0x2e, 0x37, 0x75, 0x24, 0x36, 0x69, 0xd1, - 0x02, 0x0c, 0x44, 0x0e, 0xf3, 0x8b, 0xe9, 0xcf, 0x77, 0xf7, 0xdd, 0xa0, 0x14, 0x7a, 0xfa, 0x14, - 0x5a, 0x00, 0x8b, 0x82, 0xe8, 0x63, 0xf2, 0x41, 0x2d, 0xdf, 0xd8, 0x85, 0x9f, 0x7d, 0x6f, 0x87, - 0x80, 0xf6, 0x9c, 0x56, 0xf8, 0xef, 0x1b, 0xbc, 0xd0, 0xcb, 0x00, 0xe4, 0x6e, 0x44, 0x02, 0xcf, - 0x69, 0x28, 0x6f, 0x36, 0x25, 0x17, 0x2c, 0xfb, 0xeb, 0x7e, 0x74, 0x33, 0x24, 0x2b, 0x8a, 0x02, - 0x6b, 0xd4, 0xf6, 0xaf, 0x97, 0x00, 0x62, 0xb9, 0x1d, 0xbd, 0x95, 0xda, 0xb8, 0x9e, 0xe9, 0x2c, - 0xe9, 0x1f, 0xdf, 0xae, 0x85, 0xbe, 0xcf, 0x82, 0x61, 0xa7, 0xd1, 0xf0, 0x6b, 0x0e, 0x8f, 0xf6, - 0x5b, 0xe8, 0xbc, 0x71, 0x8a, 0xfa, 0x17, 0xe2, 0x12, 0xbc, 0x09, 0xcf, 0xcb, 0x19, 0xaa, 0x61, - 0xba, 0xb6, 0x42, 0xaf, 0x18, 0xbd, 0x4f, 0x5e, 0x15, 0x8b, 0x46, 0x57, 0xaa, 0xab, 0x62, 0x89, - 0x9d, 0x11, 0xfa, 0x2d, 0xf1, 0xa6, 0x71, 0x4b, 0xec, 0xcb, 0x7f, 0x31, 0x68, 0x88, 0xaf, 0xdd, - 0x2e, 0x88, 0xa8, 0xa2, 0x47, 0x0f, 0xe8, 0xcf, 0x7f, 0x9e, 0xa7, 0xdd, 0x93, 0xba, 0x44, 0x0e, - 0xf8, 0x34, 0x8c, 0xd7, 0x4d, 0x21, 0x40, 0xcc, 0xc4, 0x27, 0xf3, 0xf8, 0x26, 0x64, 0x86, 0xf8, - 0xd8, 0x4f, 0x20, 0x70, 0x92, 0x31, 0xaa, 0xf0, 0x60, 0x12, 0x65, 0x6f, 0xcb, 0x17, 0x6f, 0x3d, - 0xec, 0xdc, 0xb1, 0xdc, 0x0f, 0x23, 0xd2, 0xa4, 0x94, 0xf1, 0xe9, 0xbe, 0x2e, 0xca, 0x62, 0xc5, - 0x05, 0xbd, 0x06, 0x03, 0xec, 0x7d, 0x56, 0x38, 0x3d, 0x94, 0xaf, 0x71, 0x36, 0xa3, 0xa3, 0xc5, - 0x0b, 0x92, 0xfd, 0x0d, 0xb1, 0xe0, 0x80, 0xae, 0xca, 0xd7, 0x8f, 0x61, 0xd9, 0xbb, 0x19, 0x12, - 0xf6, 0xfa, 0xb1, 0xb4, 0xf8, 0x78, 0xfc, 0xb0, 0x91, 0xc3, 0x33, 0x93, 0xac, 0x19, 0x25, 0xa9, - 0x14, 0x25, 0xfe, 0xcb, 0xdc, 0x6d, 0xd3, 0x90, 0xdf, 0x3c, 0x33, 0xbf, 0x5b, 0xdc, 0x9d, 0xb7, - 0x4c, 0x16, 0x38, 0xc9, 0x93, 0x4a, 0xa4, 0x7c, 0xd5, 0x8b, 0xd7, 0x22, 0xdd, 0xf6, 0x0e, 0x7e, - 0x11, 0x67, 0xa7, 0x11, 0x87, 0x60, 0x51, 0xfe, 0x44, 0xc5, 0x83, 0x19, 0x0f, 0x26, 0x92, 0x4b, - 0xf4, 0x81, 0x8a, 0x23, 0xbf, 0xdf, 0x07, 0x63, 0xe6, 0x94, 0x42, 0xf3, 0x50, 0x12, 0x4c, 0x54, - 0xfe, 0x03, 0xb5, 0x4a, 0xd6, 0x24, 0x02, 0xc7, 0x34, 0x2c, 0xed, 0x05, 0x2b, 0xae, 0xb9, 0x07, - 0xc7, 0x69, 0x2f, 0x14, 0x06, 0x6b, 0x54, 0xf4, 0x62, 0xb5, 0xe9, 0xfb, 0x91, 0x3a, 0x90, 0xd4, - 0xbc, 0x5b, 0x64, 0x50, 0x2c, 0xb0, 0xf4, 0x20, 0xda, 0x25, 0x81, 0x47, 0x1a, 0x66, 0xdc, 0x61, - 0x75, 0x10, 0x5d, 0xd3, 0x91, 0xd8, 0xa4, 0xa5, 0xc7, 0xa9, 0x1f, 0xb2, 0x89, 0x2c, 0xae, 0x6f, - 0xb1, 0xbb, 0x75, 0x95, 0x3f, 0xc0, 0x96, 0x78, 0xf4, 0x51, 0x78, 0x48, 0xc5, 0x56, 0xc2, 0xdc, - 0x9a, 0x21, 0x6b, 0x1c, 0x30, 0xb4, 0x2d, 0x0f, 0x2d, 0x65, 0x93, 0xe1, 0xbc, 0xf2, 0xe8, 0x55, - 0x18, 0x13, 0x22, 0xbe, 0xe4, 0x38, 0x68, 0x7a, 0x18, 0x5d, 0x33, 0xb0, 0x38, 0x41, 0x2d, 0x23, - 0x27, 0x33, 0x29, 0x5b, 0x72, 0x18, 0x4a, 0x47, 0x4e, 0xd6, 0xf1, 0x38, 0x55, 0x02, 0x2d, 0xc0, - 0x38, 0x97, 0xc1, 0x5c, 0x6f, 0x9b, 0x8f, 0x89, 0x78, 0xcc, 0xa5, 0x96, 0xd4, 0x0d, 0x13, 0x8d, - 0x93, 0xf4, 0xe8, 0x25, 0x18, 0x71, 0x82, 0xda, 0x8e, 0x1b, 0x91, 0x5a, 0xd4, 0x0e, 0xf8, 0x2b, - 0x2f, 0xcd, 0x45, 0x6b, 0x41, 0xc3, 0x61, 0x83, 0xd2, 0x7e, 0x0b, 0xa6, 0x32, 0x22, 0x33, 0xd0, - 0x89, 0xe3, 0xb4, 0x5c, 0xf9, 0x4d, 0x09, 0x0f, 0xe7, 0x85, 0x4a, 0x59, 0x7e, 0x8d, 0x46, 0x45, - 0x67, 0x27, 0x8b, 0xe0, 0xa0, 0xa5, 0x6a, 0x54, 0xb3, 0x73, 0x55, 0x22, 0x70, 0x4c, 0x63, 0xff, - 0xb7, 0x02, 0x8c, 0x67, 0xd8, 0x56, 0x58, 0xba, 0xc0, 0xc4, 0x25, 0x25, 0xce, 0x0e, 0x68, 0x06, - 0xe2, 0x2e, 0x1c, 0x21, 0x10, 0x77, 0xb1, 0x5b, 0x20, 0xee, 0xbe, 0xb7, 0x13, 0x88, 0xdb, 0xec, - 0xb1, 0xfe, 0x9e, 0x7a, 0x2c, 0x23, 0x78, 0xf7, 0xc0, 0x11, 0x83, 0x77, 0x1b, 0x9d, 0x3e, 0xd8, - 0x43, 0xa7, 0xff, 0x48, 0x01, 0x26, 0x92, 0xae, 0xa4, 0x27, 0xa0, 0xb7, 0x7d, 0xcd, 0xd0, 0xdb, - 0x5e, 0xea, 0xe5, 0xf1, 0x6d, 0xae, 0x0e, 0x17, 0x27, 0x74, 0xb8, 0xef, 0xed, 0x89, 0x5b, 0x67, - 0x7d, 0xee, 0x5f, 0x2f, 0xc0, 0xe9, 0xcc, 0xd7, 0xbf, 0x27, 0xd0, 0x37, 0x37, 0x8c, 0xbe, 0x79, - 0xb6, 0xe7, 0x87, 0xc9, 0xb9, 0x1d, 0x74, 0x3b, 0xd1, 0x41, 0xf3, 0xbd, 0xb3, 0xec, 0xdc, 0x4b, - 0x5f, 0x2f, 0xc2, 0xf9, 0xcc, 0x72, 0xb1, 0xda, 0x73, 0xd5, 0x50, 0x7b, 0x3e, 0x97, 0x50, 0x7b, - 0xda, 0x9d, 0x4b, 0x1f, 0x8f, 0x1e, 0x54, 0x3c, 0xd0, 0x65, 0x61, 0x06, 0xee, 0x53, 0x07, 0x6a, - 0x3c, 0xd0, 0x55, 0x8c, 0xb0, 0xc9, 0xf7, 0xdb, 0x49, 0xf7, 0xf9, 0xaf, 0x2c, 0x38, 0x9b, 0x39, - 0x36, 0x27, 0xa0, 0xeb, 0x5a, 0x37, 0x75, 0x5d, 0x4f, 0xf5, 0x3c, 0x5b, 0x73, 0x94, 0x5f, 0x3f, - 0xd5, 0x9f, 0xf3, 0x2d, 0xec, 0x26, 0x7f, 0x03, 0x86, 0x9d, 0x5a, 0x8d, 0x84, 0xe1, 0x9a, 0x5f, - 0x57, 0xb1, 0x86, 0x9f, 0x65, 0xf7, 0xac, 0x18, 0x7c, 0x78, 0x30, 0x3b, 0x93, 0x64, 0x11, 0xa3, - 0xb1, 0xce, 0x01, 0x7d, 0x02, 0x86, 0x42, 0x71, 0x6e, 0x8a, 0xb1, 0x7f, 0xbe, 0xc7, 0xce, 0x71, - 0x36, 0x49, 0xc3, 0x0c, 0x86, 0xa4, 0x34, 0x15, 0x8a, 0xa5, 0x19, 0x38, 0xa5, 0x70, 0xac, 0x81, - 0x53, 0x9e, 0x03, 0xd8, 0x53, 0x97, 0x81, 0xa4, 0xfe, 0x41, 0xbb, 0x26, 0x68, 0x54, 0xe8, 0xc3, - 0x30, 0x11, 0xf2, 0x68, 0x81, 0x4b, 0x0d, 0x27, 0x64, 0xef, 0x68, 0xc4, 0x2c, 0x64, 0x01, 0x97, - 0xaa, 0x09, 0x1c, 0x4e, 0x51, 0xa3, 0x55, 0x59, 0x2b, 0x0b, 0x6d, 0xc8, 0x27, 0xe6, 0xc5, 0xb8, - 0x46, 0x91, 0xac, 0xf8, 0x54, 0xb2, 0xfb, 0x59, 0xc7, 0x6b, 0x25, 0xd1, 0x27, 0x00, 0xe8, 0xf4, - 0x11, 0x7a, 0x88, 0xc1, 0xfc, 0xcd, 0x93, 0xee, 0x2a, 0xf5, 0x4c, 0xe7, 0x66, 0xf6, 0xa6, 0x76, - 0x59, 0x31, 0xc1, 0x1a, 0x43, 0xe4, 0xc0, 0x68, 0xfc, 0x2f, 0xce, 0xe5, 0x79, 0x29, 0xb7, 0x86, - 0x24, 0x73, 0xa6, 0xf2, 0x5e, 0xd6, 0x59, 0x60, 0x93, 0xa3, 0xfd, 0x63, 0x83, 0xf0, 0x70, 0x87, - 0x6d, 0x18, 0x2d, 0x98, 0xa6, 0xde, 0xa7, 0x93, 0xf7, 0xf7, 0x99, 0xcc, 0xc2, 0xc6, 0x85, 0x3e, - 0x31, 0xdb, 0x0b, 0x6f, 0x7b, 0xb6, 0xff, 0x90, 0xa5, 0x69, 0x56, 0xb8, 0x53, 0xe9, 0x87, 0x8e, - 0x78, 0xbc, 0x1c, 0xa3, 0xaa, 0x65, 0x2b, 0x43, 0x5f, 0xf1, 0x5c, 0xcf, 0xcd, 0xe9, 0x5d, 0x81, - 0xf1, 0x55, 0x0b, 0x90, 0xd0, 0xac, 0x90, 0xba, 0x5a, 0x4b, 0x42, 0x95, 0x71, 0xe5, 0xa8, 0xdf, - 0xbf, 0x90, 0xe2, 0xc4, 0x7b, 0xe2, 0x65, 0x79, 0x0e, 0xa4, 0x09, 0xba, 0xf6, 0x49, 0x46, 0xf3, - 0xd0, 0x47, 0x59, 0x20, 0x5d, 0xf7, 0x2d, 0x21, 0xfc, 0x88, 0xb5, 0xf6, 0xa2, 0x08, 0xa2, 0xab, - 0xe0, 0x54, 0xca, 0xcd, 0x6c, 0xae, 0x4e, 0x84, 0x0d, 0x56, 0x27, 0x7b, 0xf5, 0x6e, 0xc3, 0x43, - 0x39, 0x5d, 0xf6, 0x40, 0x6f, 0xe0, 0xbf, 0x69, 0xc1, 0xb9, 0x8e, 0x11, 0x61, 0xbe, 0x05, 0x65, - 0x43, 0xfb, 0x73, 0x16, 0x64, 0x0f, 0xb6, 0xe1, 0x51, 0x36, 0x0f, 0xa5, 0x5a, 0x22, 0xeb, 0x60, - 0x1c, 0x1b, 0x41, 0x65, 0x1c, 0x8c, 0x69, 0x0c, 0xc7, 0xb1, 0x42, 0x57, 0xc7, 0xb1, 0x5f, 0xb1, - 0x20, 0xb5, 0xbf, 0x9f, 0x80, 0xa0, 0x51, 0x36, 0x05, 0x8d, 0xc7, 0x7b, 0xe9, 0xcd, 0x1c, 0x19, - 0xe3, 0x8f, 0xc6, 0xe1, 0x4c, 0xce, 0x8b, 0xbc, 0x3d, 0x98, 0xdc, 0xae, 0x11, 0xf3, 0x71, 0x75, - 0xa7, 0xa0, 0x43, 0x1d, 0x5f, 0x62, 0xf3, 0x64, 0x8f, 0x29, 0x12, 0x9c, 0xae, 0x02, 0x7d, 0xce, - 0x82, 0x53, 0xce, 0x9d, 0x70, 0x85, 0x0a, 0x8c, 0x6e, 0x6d, 0xb1, 0xe1, 0xd7, 0x76, 0xe9, 0x69, - 0x2c, 0x17, 0xc2, 0x0b, 0x99, 0x4a, 0xbc, 0xdb, 0xd5, 0x14, 0xbd, 0x51, 0x3d, 0x4b, 0xed, 0x9b, - 0x45, 0x85, 0x33, 0xeb, 0x42, 0x58, 0x64, 0x4f, 0xa0, 0xd7, 0xd1, 0x0e, 0xcf, 0xff, 0xb3, 0x9e, - 0x4e, 0x72, 0x09, 0x48, 0x62, 0xb0, 0xe2, 0x83, 0x3e, 0x05, 0xa5, 0x6d, 0xf9, 0xd2, 0x37, 0x43, - 0xc2, 0x8a, 0x3b, 0xb2, 0xf3, 0xfb, 0x67, 0x6e, 0x89, 0x57, 0x44, 0x38, 0x66, 0x8a, 0x5e, 0x85, - 0xa2, 0xb7, 0x15, 0x76, 0xca, 0x8e, 0x9b, 0x70, 0xb9, 0xe4, 0x41, 0x36, 0xd6, 0x57, 0xab, 0x98, - 0x16, 0x44, 0x57, 0xa1, 0x18, 0x6c, 0xd6, 0x85, 0x06, 0x3a, 0x73, 0x91, 0xe2, 0xc5, 0xe5, 0x9c, - 0x56, 0x31, 0x4e, 0x78, 0x71, 0x19, 0x53, 0x16, 0xa8, 0x02, 0xfd, 0xec, 0x19, 0x9b, 0x90, 0x67, - 0x32, 0x6f, 0x6e, 0x1d, 0x9e, 0x83, 0xf2, 0x48, 0x1c, 0x8c, 0x00, 0x73, 0x46, 0x68, 0x03, 0x06, - 0x6a, 0x2c, 0x93, 0xaa, 0x10, 0x60, 0xde, 0x97, 0xa9, 0x6b, 0xee, 0x90, 0x62, 0x56, 0xa8, 0x5e, - 0x19, 0x05, 0x16, 0xbc, 0x18, 0x57, 0xd2, 0xda, 0xd9, 0x0a, 0x45, 0xa6, 0xf1, 0x6c, 0xae, 0x1d, - 0x32, 0x27, 0x0b, 0xae, 0x8c, 0x02, 0x0b, 0x5e, 0xe8, 0x65, 0x28, 0x6c, 0xd5, 0xc4, 0x13, 0xb5, - 0x4c, 0xa5, 0xb3, 0x19, 0x27, 0x65, 0x71, 0xe0, 0xde, 0xc1, 0x6c, 0x61, 0x75, 0x09, 0x17, 0xb6, - 0x6a, 0x68, 0x1d, 0x06, 0xb7, 0x78, 0x64, 0x05, 0xa1, 0x57, 0x7e, 0x32, 0x3b, 0xe8, 0x43, 0x2a, - 0xf8, 0x02, 0x7f, 0xee, 0x24, 0x10, 0x58, 0x32, 0x61, 0xc9, 0x08, 0x54, 0x84, 0x08, 0x11, 0xa0, - 0x6e, 0xee, 0x68, 0x51, 0x3d, 0xb8, 0x7c, 0x19, 0xc7, 0x99, 0xc0, 0x1a, 0x47, 0x3a, 0xab, 0x9d, - 0xb7, 0xda, 0x01, 0x8b, 0x02, 0x2e, 0x22, 0x19, 0x65, 0xce, 0xea, 0x05, 0x49, 0xd4, 0x69, 0x56, - 0x2b, 0x22, 0x1c, 0x33, 0x45, 0xbb, 0x30, 0xba, 0x17, 0xb6, 0x76, 0x88, 0x5c, 0xd2, 0x2c, 0xb0, - 0x51, 0x8e, 0x7c, 0x74, 0x4b, 0x10, 0xba, 0x41, 0xd4, 0x76, 0x1a, 0xa9, 0x5d, 0x88, 0xc9, 0xb2, - 0xb7, 0x74, 0x66, 0xd8, 0xe4, 0x4d, 0xbb, 0xff, 0xcd, 0xb6, 0xbf, 0xb9, 0x1f, 0x11, 0x11, 0x57, - 0x2e, 0xb3, 0xfb, 0x5f, 0xe7, 0x24, 0xe9, 0xee, 0x17, 0x08, 0x2c, 0x99, 0xa0, 0x5b, 0xa2, 0x7b, - 0xd8, 0xee, 0x39, 0x91, 0x1f, 0xfc, 0x75, 0x41, 0x12, 0xe5, 0x74, 0x0a, 0xdb, 0x2d, 0x63, 0x56, - 0x6c, 0x97, 0x6c, 0xed, 0xf8, 0x91, 0xef, 0x25, 0x76, 0xe8, 0xc9, 0xfc, 0x5d, 0xb2, 0x92, 0x41, - 0x9f, 0xde, 0x25, 0xb3, 0xa8, 0x70, 0x66, 0x5d, 0xa8, 0x0e, 0x63, 0x2d, 0x3f, 0x88, 0xee, 0xf8, - 0x81, 0x9c, 0x5f, 0xa8, 0x83, 0x5e, 0xcc, 0xa0, 0x14, 0x35, 0xb2, 0x90, 0x8d, 0x26, 0x06, 0x27, - 0x78, 0xa2, 0x8f, 0xc0, 0x60, 0x58, 0x73, 0x1a, 0xa4, 0x7c, 0x63, 0x7a, 0x2a, 0xff, 0xf8, 0xa9, - 0x72, 0x92, 0x9c, 0xd9, 0xc5, 0x03, 0x63, 0x70, 0x12, 0x2c, 0xd9, 0xa1, 0x55, 0xe8, 0x67, 0xc9, - 0xe6, 0x58, 0x10, 0xc4, 0x9c, 0x18, 0xb6, 0x29, 0x07, 0x78, 0xbe, 0x37, 0x31, 0x30, 0xe6, 0xc5, - 0xe9, 0x1a, 0x10, 0xd7, 0x43, 0x3f, 0x9c, 0x3e, 0x9d, 0xbf, 0x06, 0xc4, 0xad, 0xf2, 0x46, 0xb5, - 0xd3, 0x1a, 0x50, 0x44, 0x38, 0x66, 0x4a, 0x77, 0x66, 0xba, 0x9b, 0x9e, 0xe9, 0xe0, 0xb9, 0x95, - 0xbb, 0x97, 0xb2, 0x9d, 0x99, 0xee, 0xa4, 0x94, 0x85, 0xfd, 0xbb, 0x83, 0x69, 0x99, 0x85, 0x29, - 0x14, 0xbe, 0xc7, 0x4a, 0xd9, 0x9a, 0xdf, 0xdf, 0xab, 0x7e, 0xf3, 0x18, 0xaf, 0x42, 0x9f, 0xb3, - 0xe0, 0x4c, 0x2b, 0xf3, 0x43, 0x84, 0x00, 0xd0, 0x9b, 0x9a, 0x94, 0x7f, 0xba, 0x0a, 0x98, 0x99, - 0x8d, 0xc7, 0x39, 0x35, 0x25, 0xaf, 0x9b, 0xc5, 0xb7, 0x7d, 0xdd, 0x5c, 0x83, 0xa1, 0x1a, 0xbf, - 0x8a, 0x74, 0xcc, 0x2c, 0x9e, 0xbc, 0x7b, 0x33, 0x51, 0x42, 0xdc, 0x61, 0xb6, 0xb0, 0x62, 0x81, - 0x7e, 0xd8, 0x82, 0x73, 0xc9, 0xa6, 0x63, 0xc2, 0xd0, 0x22, 0xca, 0x26, 0xd7, 0x65, 0xac, 0x8a, - 0xef, 0x4f, 0xc9, 0xff, 0x06, 0xf1, 0x61, 0x37, 0x02, 0xdc, 0xb9, 0x32, 0xb4, 0x9c, 0xa1, 0x4c, - 0x19, 0x30, 0x0d, 0x48, 0x3d, 0x28, 0x54, 0x5e, 0x80, 0x91, 0xa6, 0xdf, 0xf6, 0x22, 0xe1, 0xe8, - 0x25, 0x9c, 0x4e, 0x98, 0xb3, 0xc5, 0x9a, 0x06, 0xc7, 0x06, 0x55, 0x42, 0x0d, 0x33, 0x74, 0xdf, - 0x6a, 0x98, 0x37, 0x60, 0xc4, 0xd3, 0x3c, 0x93, 0x85, 0x3c, 0x70, 0x31, 0x3f, 0x42, 0xae, 0xee, - 0xc7, 0xcc, 0x5b, 0xa9, 0x43, 0xb0, 0xc1, 0xed, 0x64, 0x3d, 0xc0, 0xbe, 0x64, 0x65, 0x08, 0xf5, - 0x5c, 0x15, 0xf3, 0x41, 0x53, 0x15, 0x73, 0x31, 0xa9, 0x8a, 0x49, 0x19, 0x0f, 0x0c, 0x2d, 0x4c, - 0xef, 0x09, 0x80, 0x7a, 0x8d, 0xb2, 0x69, 0x37, 0xe0, 0x42, 0xb7, 0x63, 0x89, 0x79, 0xfc, 0xd5, - 0x95, 0xa9, 0x38, 0xf6, 0xf8, 0xab, 0x97, 0x97, 0x31, 0xc3, 0xf4, 0x1a, 0xbf, 0xc9, 0xfe, 0x2f, - 0x16, 0x14, 0x2b, 0x7e, 0xfd, 0x04, 0x2e, 0xbc, 0x1f, 0x32, 0x2e, 0xbc, 0x0f, 0x67, 0x1f, 0x88, - 0xf5, 0x5c, 0xd3, 0xc7, 0x4a, 0xc2, 0xf4, 0x71, 0x2e, 0x8f, 0x41, 0x67, 0x43, 0xc7, 0x4f, 0x16, - 0x61, 0xb8, 0xe2, 0xd7, 0x95, 0xbb, 0xfd, 0x3f, 0xbd, 0x1f, 0x77, 0xfb, 0xdc, 0x34, 0x16, 0x1a, - 0x67, 0xe6, 0x28, 0x28, 0x5f, 0x1a, 0x7f, 0x8b, 0x79, 0xdd, 0xdf, 0x26, 0xee, 0xf6, 0x4e, 0x44, - 0xea, 0xc9, 0xcf, 0x39, 0x39, 0xaf, 0xfb, 0xdf, 0x2d, 0xc0, 0x78, 0xa2, 0x76, 0xd4, 0x80, 0xd1, - 0x86, 0xae, 0x58, 0x17, 0xf3, 0xf4, 0xbe, 0x74, 0xf2, 0xc2, 0x6b, 0x59, 0x03, 0x61, 0x93, 0x39, - 0x9a, 0x03, 0x50, 0x96, 0x66, 0xa9, 0x5e, 0x65, 0x52, 0xbf, 0x32, 0x45, 0x87, 0x58, 0xa3, 0x40, - 0x2f, 0xc2, 0x70, 0xe4, 0xb7, 0xfc, 0x86, 0xbf, 0xbd, 0x7f, 0x8d, 0xc8, 0xd0, 0x5e, 0xca, 0x17, - 0x71, 0x23, 0x46, 0x61, 0x9d, 0x0e, 0xdd, 0x85, 0x49, 0xc5, 0xa4, 0x7a, 0x0c, 0xc6, 0x06, 0xa6, - 0x55, 0x58, 0x4f, 0x72, 0xc4, 0xe9, 0x4a, 0xec, 0x9f, 0x29, 0xf2, 0x2e, 0xf6, 0x22, 0xf7, 0xdd, - 0xd5, 0xf0, 0xce, 0x5e, 0x0d, 0x5f, 0xb7, 0x60, 0x82, 0xd6, 0xce, 0x1c, 0xad, 0xe4, 0x31, 0xaf, - 0x62, 0x72, 0x5b, 0x1d, 0x62, 0x72, 0x5f, 0xa4, 0xbb, 0x66, 0xdd, 0x6f, 0x47, 0x42, 0x77, 0xa7, - 0x6d, 0x8b, 0x14, 0x8a, 0x05, 0x56, 0xd0, 0x91, 0x20, 0x10, 0x8f, 0x43, 0x75, 0x3a, 0x12, 0x04, - 0x58, 0x60, 0x65, 0xc8, 0xee, 0xbe, 0xec, 0x90, 0xdd, 0x3c, 0xf2, 0xaa, 0x70, 0xc9, 0x11, 0x02, - 0x97, 0x16, 0x79, 0x55, 0xfa, 0xea, 0xc4, 0x34, 0xf6, 0x57, 0x8b, 0x30, 0x52, 0xf1, 0xeb, 0xb1, - 0x95, 0xf9, 0x05, 0xc3, 0xca, 0x7c, 0x21, 0x61, 0x65, 0x9e, 0xd0, 0x69, 0xdf, 0xb5, 0x29, 0x7f, - 0xb3, 0x6c, 0xca, 0xbf, 0x6c, 0xb1, 0x51, 0x5b, 0x5e, 0xaf, 0x72, 0xbf, 0x3d, 0x74, 0x19, 0x86, - 0xd9, 0x06, 0xc3, 0x5e, 0x23, 0x4b, 0xd3, 0x2b, 0x4b, 0x45, 0xb5, 0x1e, 0x83, 0xb1, 0x4e, 0x83, - 0x2e, 0xc1, 0x50, 0x48, 0x9c, 0xa0, 0xb6, 0xa3, 0x76, 0x57, 0x61, 0x27, 0xe5, 0x30, 0xac, 0xb0, - 0xe8, 0xf5, 0x38, 0xe8, 0x67, 0x31, 0xff, 0x75, 0xa3, 0xde, 0x1e, 0xbe, 0x44, 0xf2, 0x23, 0x7d, - 0xda, 0xb7, 0x01, 0xa5, 0xe9, 0x7b, 0x08, 0x4b, 0x37, 0x6b, 0x86, 0xa5, 0x2b, 0xa5, 0x42, 0xd2, - 0xfd, 0x89, 0x05, 0x63, 0x15, 0xbf, 0x4e, 0x97, 0xee, 0xb7, 0xd3, 0x3a, 0xd5, 0x23, 0x1e, 0x0f, - 0x74, 0x88, 0x78, 0xfc, 0x18, 0xf4, 0x57, 0xfc, 0x7a, 0xb9, 0xd2, 0x29, 0xb4, 0x80, 0xfd, 0x37, - 0x2c, 0x18, 0xac, 0xf8, 0xf5, 0x13, 0x30, 0x0b, 0x7c, 0xd0, 0x34, 0x0b, 0x3c, 0x94, 0x33, 0x6f, - 0x72, 0x2c, 0x01, 0x7f, 0xad, 0x0f, 0x46, 0x69, 0x3b, 0xfd, 0x6d, 0x39, 0x94, 0x46, 0xb7, 0x59, - 0x3d, 0x74, 0x1b, 0x95, 0xc2, 0xfd, 0x46, 0xc3, 0xbf, 0x93, 0x1c, 0xd6, 0x55, 0x06, 0xc5, 0x02, - 0x8b, 0x9e, 0x81, 0xa1, 0x56, 0x40, 0xf6, 0x5c, 0x5f, 0x88, 0xb7, 0x9a, 0x91, 0xa5, 0x22, 0xe0, - 0x58, 0x51, 0xd0, 0x6b, 0x61, 0xe8, 0x7a, 0xf4, 0x28, 0xaf, 0xf9, 0x5e, 0x9d, 0x6b, 0xce, 0x8b, - 0x22, 0x2d, 0x87, 0x06, 0xc7, 0x06, 0x15, 0xba, 0x0d, 0x25, 0xf6, 0x9f, 0x6d, 0x3b, 0x47, 0x4f, - 0xf0, 0x2a, 0x12, 0xfe, 0x09, 0x06, 0x38, 0xe6, 0x85, 0x9e, 0x03, 0x88, 0x64, 0x68, 0xfb, 0x50, - 0x04, 0x5a, 0x53, 0x57, 0x01, 0x15, 0xf4, 0x3e, 0xc4, 0x1a, 0x15, 0x7a, 0x1a, 0x4a, 0x91, 0xe3, - 0x36, 0xae, 0xbb, 0x1e, 0x09, 0x99, 0x46, 0xbc, 0x28, 0xf3, 0xee, 0x09, 0x20, 0x8e, 0xf1, 0x54, - 0x14, 0x63, 0x41, 0x38, 0x78, 0x7a, 0xe8, 0x21, 0x46, 0xcd, 0x44, 0xb1, 0xeb, 0x0a, 0x8a, 0x35, - 0x0a, 0xb4, 0x03, 0x8f, 0xb8, 0x1e, 0x4b, 0x61, 0x41, 0xaa, 0xbb, 0x6e, 0x6b, 0xe3, 0x7a, 0xf5, - 0x16, 0x09, 0xdc, 0xad, 0xfd, 0x45, 0xa7, 0xb6, 0x4b, 0x3c, 0x99, 0xba, 0xf3, 0x71, 0xd1, 0xc4, - 0x47, 0xca, 0x1d, 0x68, 0x71, 0x47, 0x4e, 0xf6, 0xf3, 0x6c, 0xbe, 0xdf, 0xa8, 0xa2, 0xf7, 0x1a, - 0x5b, 0xc7, 0x19, 0x7d, 0xeb, 0x38, 0x3c, 0x98, 0x1d, 0xb8, 0x51, 0xd5, 0x62, 0x48, 0xbc, 0x04, - 0xa7, 0x2b, 0x7e, 0xbd, 0xe2, 0x07, 0xd1, 0xaa, 0x1f, 0xdc, 0x71, 0x82, 0xba, 0x9c, 0x5e, 0xb3, - 0x32, 0x8a, 0x06, 0xdd, 0x3f, 0xfb, 0xf9, 0xee, 0x62, 0x44, 0xc8, 0x78, 0x9e, 0x49, 0x6c, 0x47, - 0x7c, 0xfb, 0x55, 0x63, 0xb2, 0x83, 0x4a, 0x02, 0x73, 0xc5, 0x89, 0x08, 0xba, 0xc1, 0x92, 0x5b, - 0xc7, 0xc7, 0xa8, 0x28, 0xfe, 0x94, 0x96, 0xdc, 0x3a, 0x46, 0x66, 0x9e, 0xbb, 0x66, 0x79, 0xfb, - 0xb3, 0xa2, 0x12, 0x7e, 0x07, 0xe7, 0xfe, 0x75, 0xbd, 0x64, 0xb7, 0x95, 0x59, 0x22, 0x0a, 0xf9, - 0xe9, 0x05, 0xb8, 0xd5, 0xb3, 0x63, 0x96, 0x08, 0xfb, 0x45, 0x98, 0xa4, 0x57, 0x3f, 0x25, 0x47, - 0xb1, 0x8f, 0xec, 0x1e, 0xcd, 0xe3, 0xbf, 0xf6, 0xb3, 0x73, 0x20, 0x91, 0xfe, 0x04, 0x7d, 0x12, - 0xc6, 0x42, 0x72, 0xdd, 0xf5, 0xda, 0x77, 0xa5, 0xe2, 0xa5, 0xc3, 0x9b, 0xc3, 0xea, 0x8a, 0x4e, - 0xc9, 0xd5, 0xb7, 0x26, 0x0c, 0x27, 0xb8, 0xa1, 0x26, 0x8c, 0xdd, 0x71, 0xbd, 0xba, 0x7f, 0x27, - 0x94, 0xfc, 0x87, 0xf2, 0xb5, 0xb8, 0xb7, 0x39, 0x65, 0xa2, 0x8d, 0x46, 0x75, 0xb7, 0x0d, 0x66, - 0x38, 0xc1, 0x9c, 0xae, 0xb5, 0xa0, 0xed, 0x2d, 0x84, 0x37, 0x43, 0x12, 0x88, 0xe4, 0xea, 0x6c, - 0xad, 0x61, 0x09, 0xc4, 0x31, 0x9e, 0xae, 0x35, 0xf6, 0xe7, 0x4a, 0xe0, 0xb7, 0x79, 0xae, 0x0d, - 0xb1, 0xd6, 0xb0, 0x82, 0x62, 0x8d, 0x82, 0xee, 0x45, 0xec, 0xdf, 0xba, 0xef, 0x61, 0xdf, 0x8f, - 0xe4, 0xee, 0xc5, 0x3c, 0x11, 0x34, 0x38, 0x36, 0xa8, 0xd0, 0x2a, 0xa0, 0xb0, 0xdd, 0x6a, 0x35, - 0x98, 0x33, 0x93, 0xd3, 0x60, 0xac, 0xb8, 0x97, 0x47, 0x91, 0xc7, 0x0a, 0xae, 0xa6, 0xb0, 0x38, - 0xa3, 0x04, 0x3d, 0x96, 0xb6, 0x44, 0x53, 0xfb, 0x59, 0x53, 0xb9, 0xc5, 0xa7, 0xca, 0xdb, 0x29, - 0x71, 0x68, 0x05, 0x06, 0xc3, 0xfd, 0xb0, 0x16, 0x89, 0xd0, 0x8e, 0x39, 0x19, 0xae, 0xaa, 0x8c, - 0x44, 0x4b, 0xb0, 0xc8, 0x8b, 0x60, 0x59, 0x16, 0xd5, 0x60, 0x4a, 0x70, 0x5c, 0xda, 0x71, 0x3c, - 0x95, 0x2f, 0x88, 0xfb, 0x74, 0x5f, 0xbe, 0x77, 0x30, 0x3b, 0x25, 0x6a, 0xd6, 0xd1, 0x87, 0x07, - 0xb3, 0x67, 0x2a, 0x7e, 0x3d, 0x03, 0x83, 0xb3, 0xb8, 0xf1, 0xc9, 0x57, 0xab, 0xf9, 0xcd, 0x56, - 0x25, 0xf0, 0xb7, 0xdc, 0x06, 0xe9, 0x64, 0x35, 0xab, 0x1a, 0x94, 0x62, 0xf2, 0x19, 0x30, 0x9c, - 0xe0, 0x66, 0x7f, 0x96, 0x89, 0x6e, 0x2c, 0x9f, 0x78, 0xd4, 0x0e, 0x08, 0x6a, 0xc2, 0x68, 0x8b, - 0x2d, 0x6e, 0x91, 0x01, 0x43, 0xcc, 0xf5, 0x17, 0x7a, 0xd4, 0xfe, 0xdc, 0xa1, 0x27, 0x9e, 0xe9, - 0x19, 0x55, 0xd1, 0xd9, 0x61, 0x93, 0xbb, 0xfd, 0x6f, 0xce, 0xb2, 0xc3, 0xbf, 0xca, 0x55, 0x3a, - 0x83, 0xe2, 0x09, 0x89, 0xb8, 0x45, 0xce, 0xe4, 0xeb, 0x16, 0xe3, 0x61, 0x11, 0xcf, 0x50, 0xb0, - 0x2c, 0x8b, 0x3e, 0x01, 0x63, 0xf4, 0x52, 0xa6, 0x0e, 0xe0, 0x70, 0xfa, 0x54, 0x7e, 0xa8, 0x0f, - 0x45, 0xa5, 0x67, 0xc7, 0xd1, 0x0b, 0xe3, 0x04, 0x33, 0xf4, 0x3a, 0xf3, 0x44, 0x92, 0xac, 0x0b, - 0xbd, 0xb0, 0xd6, 0x9d, 0x8e, 0x24, 0x5b, 0x8d, 0x09, 0x6a, 0xc3, 0x54, 0x3a, 0x97, 0x5e, 0x38, - 0x6d, 0xe7, 0x4b, 0xb7, 0xe9, 0x74, 0x78, 0x71, 0x1a, 0x93, 0x34, 0x2e, 0xc4, 0x59, 0xfc, 0xd1, - 0x75, 0x18, 0x15, 0x49, 0xb5, 0xc5, 0xcc, 0x2d, 0x1a, 0x2a, 0xcf, 0x51, 0xac, 0x23, 0x0f, 0x93, - 0x00, 0x6c, 0x16, 0x46, 0xdb, 0x70, 0x4e, 0x4b, 0x72, 0x75, 0x25, 0x70, 0x98, 0xdf, 0x82, 0xcb, - 0xb6, 0x53, 0x4d, 0x2c, 0x79, 0xf4, 0xde, 0xc1, 0xec, 0xb9, 0x8d, 0x4e, 0x84, 0xb8, 0x33, 0x1f, - 0x74, 0x03, 0x4e, 0xf3, 0x87, 0xea, 0xcb, 0xc4, 0xa9, 0x37, 0x5c, 0x4f, 0xc9, 0x3d, 0x7c, 0xc9, - 0x9f, 0xbd, 0x77, 0x30, 0x7b, 0x7a, 0x21, 0x8b, 0x00, 0x67, 0x97, 0x43, 0x1f, 0x84, 0x52, 0xdd, - 0x0b, 0x45, 0x1f, 0x0c, 0x18, 0x79, 0xc4, 0x4a, 0xcb, 0xeb, 0x55, 0xf5, 0xfd, 0xf1, 0x1f, 0x1c, - 0x17, 0x40, 0xdb, 0x5c, 0x2d, 0xae, 0x94, 0x35, 0x83, 0xa9, 0x40, 0x5d, 0x49, 0x7d, 0xa6, 0xf1, - 0x54, 0x95, 0xdb, 0x83, 0xd4, 0x0b, 0x0e, 0xe3, 0x15, 0xab, 0xc1, 0x18, 0xbd, 0x06, 0x48, 0xc4, - 0xab, 0x5f, 0xa8, 0xb1, 0xf4, 0x2a, 0xcc, 0x8a, 0x30, 0x64, 0x3e, 0x9e, 0xac, 0xa6, 0x28, 0x70, - 0x46, 0x29, 0x74, 0x95, 0xee, 0x2a, 0x3a, 0x54, 0xec, 0x5a, 0x2a, 0xeb, 0xe3, 0x32, 0x69, 0x05, - 0x84, 0xf9, 0x61, 0x99, 0x1c, 0x71, 0xa2, 0x1c, 0xaa, 0xc3, 0x23, 0x4e, 0x3b, 0xf2, 0x99, 0xc5, - 0xc1, 0x24, 0xdd, 0xf0, 0x77, 0x89, 0xc7, 0x8c, 0x7d, 0x43, 0x8b, 0x17, 0xa8, 0x60, 0xb5, 0xd0, - 0x81, 0x0e, 0x77, 0xe4, 0x42, 0x05, 0x62, 0x95, 0xe6, 0x19, 0xcc, 0xf0, 0x63, 0x19, 0xa9, 0x9e, - 0x5f, 0x84, 0xe1, 0x1d, 0x3f, 0x8c, 0xd6, 0x49, 0x74, 0xc7, 0x0f, 0x76, 0x45, 0x18, 0xdd, 0x38, - 0x28, 0x79, 0x8c, 0xc2, 0x3a, 0x1d, 0xbd, 0xf1, 0x32, 0x57, 0x94, 0xf2, 0x32, 0xf3, 0x02, 0x18, - 0x8a, 0xf7, 0x98, 0xab, 0x1c, 0x8c, 0x25, 0x5e, 0x92, 0x96, 0x2b, 0x4b, 0xcc, 0xa2, 0x9f, 0x20, - 0x2d, 0x57, 0x96, 0xb0, 0xc4, 0xd3, 0xe9, 0x1a, 0xee, 0x38, 0x01, 0xa9, 0x04, 0x7e, 0x8d, 0x84, - 0x5a, 0x28, 0xfc, 0x87, 0x79, 0x90, 0x60, 0x3a, 0x5d, 0xab, 0x59, 0x04, 0x38, 0xbb, 0x1c, 0x22, - 0xe9, 0x04, 0x6f, 0x63, 0xf9, 0xa6, 0x98, 0xb4, 0x3c, 0xd3, 0x63, 0x8e, 0x37, 0x0f, 0x26, 0x54, - 0x6a, 0x39, 0x1e, 0x16, 0x38, 0x9c, 0x1e, 0x67, 0x73, 0xbb, 0xf7, 0x98, 0xc2, 0xca, 0xb8, 0x55, - 0x4e, 0x70, 0xc2, 0x29, 0xde, 0x46, 0x84, 0xb9, 0x89, 0xae, 0x11, 0xe6, 0xe6, 0xa1, 0x14, 0xb6, - 0x37, 0xeb, 0x7e, 0xd3, 0x71, 0x3d, 0x66, 0xd1, 0xd7, 0xae, 0x5e, 0x55, 0x89, 0xc0, 0x31, 0x0d, - 0x5a, 0x85, 0x21, 0x47, 0x5a, 0xae, 0x50, 0x7e, 0x4c, 0x21, 0x65, 0xaf, 0xe2, 0x61, 0x36, 0xa4, - 0xad, 0x4a, 0x95, 0x45, 0xaf, 0xc0, 0xa8, 0x78, 0x68, 0x2d, 0xb2, 0x9a, 0x4e, 0x99, 0xaf, 0xe1, - 0xaa, 0x3a, 0x12, 0x9b, 0xb4, 0xe8, 0x26, 0x0c, 0x47, 0x7e, 0x83, 0x3d, 0xe9, 0xa2, 0x62, 0xde, - 0x99, 0xfc, 0xe8, 0x78, 0x1b, 0x8a, 0x4c, 0x57, 0x1a, 0xab, 0xa2, 0x58, 0xe7, 0x83, 0x36, 0xf8, - 0x7c, 0x67, 0x81, 0xef, 0x49, 0x38, 0xfd, 0x50, 0xfe, 0x99, 0xa4, 0xe2, 0xe3, 0x9b, 0xcb, 0x41, - 0x94, 0xc4, 0x3a, 0x1b, 0x74, 0x05, 0x26, 0x5b, 0x81, 0xeb, 0xb3, 0x39, 0xa1, 0x8c, 0x96, 0xd3, - 0x66, 0x9a, 0xab, 0x4a, 0x92, 0x00, 0xa7, 0xcb, 0xb0, 0x77, 0xf2, 0x02, 0x38, 0x7d, 0x96, 0xa7, - 0xea, 0xe0, 0x37, 0x59, 0x0e, 0xc3, 0x0a, 0x8b, 0xd6, 0xd8, 0x4e, 0xcc, 0x95, 0x30, 0xd3, 0x33, - 0xf9, 0x61, 0x8c, 0x74, 0x65, 0x0d, 0x17, 0x5e, 0xd5, 0x5f, 0x1c, 0x73, 0x40, 0x75, 0x2d, 0x43, - 0x26, 0xbd, 0x02, 0x84, 0xd3, 0x8f, 0x74, 0xf0, 0x07, 0x4c, 0x5c, 0x8a, 0x62, 0x81, 0xc0, 0x00, - 0x87, 0x38, 0xc1, 0x13, 0x7d, 0x18, 0x26, 0x44, 0xf0, 0xc5, 0xb8, 0x9b, 0xce, 0xc5, 0x8e, 0xf2, - 0x38, 0x81, 0xc3, 0x29, 0x6a, 0x9e, 0x2a, 0xc3, 0xd9, 0x6c, 0x10, 0xb1, 0xf5, 0x5d, 0x77, 0xbd, - 0xdd, 0x70, 0xfa, 0x3c, 0xdb, 0x1f, 0x44, 0xaa, 0x8c, 0x24, 0x16, 0x67, 0x94, 0x40, 0x1b, 0x30, - 0xd1, 0x0a, 0x08, 0x69, 0x32, 0x41, 0x5f, 0x9c, 0x67, 0xb3, 0x3c, 0x4c, 0x04, 0x6d, 0x49, 0x25, - 0x81, 0x3b, 0xcc, 0x80, 0xe1, 0x14, 0x07, 0x74, 0x07, 0x86, 0xfc, 0x3d, 0x12, 0xec, 0x10, 0xa7, - 0x3e, 0x7d, 0xa1, 0xc3, 0xc3, 0x0d, 0x71, 0xb8, 0xdd, 0x10, 0xb4, 0x09, 0x47, 0x07, 0x09, 0xee, - 0xee, 0xe8, 0x20, 0x2b, 0x43, 0x7f, 0xde, 0x82, 0xb3, 0xd2, 0x36, 0x52, 0x6d, 0xd1, 0x5e, 0x5f, - 0xf2, 0xbd, 0x30, 0x0a, 0x78, 0x60, 0x83, 0x47, 0xf3, 0x1f, 0xfb, 0x6f, 0xe4, 0x14, 0x52, 0x7a, - 0xe0, 0xb3, 0x79, 0x14, 0x21, 0xce, 0xaf, 0x11, 0x2d, 0xc1, 0x64, 0x48, 0x22, 0xb9, 0x19, 0x2d, - 0x84, 0xab, 0xaf, 0x2f, 0xaf, 0x4f, 0x3f, 0xc6, 0xa3, 0x32, 0xd0, 0xc5, 0x50, 0x4d, 0x22, 0x71, - 0x9a, 0x1e, 0x5d, 0x86, 0x82, 0x1f, 0x4e, 0x3f, 0xde, 0x21, 0xa9, 0xaa, 0x5f, 0xbf, 0x51, 0xe5, - 0x0e, 0x6f, 0x37, 0xaa, 0xb8, 0xe0, 0x87, 0x32, 0x5d, 0x05, 0xbd, 0x8f, 0x85, 0xd3, 0x4f, 0x70, - 0xad, 0xa1, 0x4c, 0x57, 0xc1, 0x80, 0x38, 0xc6, 0xa3, 0x1d, 0x18, 0x0f, 0x8d, 0x7b, 0x6f, 0x38, - 0x7d, 0x91, 0xf5, 0xd4, 0x13, 0x79, 0x83, 0x66, 0x50, 0x6b, 0xd1, 0xe6, 0x4d, 0x2e, 0x38, 0xc9, - 0x96, 0xaf, 0x2e, 0xed, 0x82, 0x1f, 0x4e, 0x3f, 0xd9, 0x65, 0x75, 0x69, 0xc4, 0xfa, 0xea, 0xd2, - 0x79, 0xe0, 0x04, 0xcf, 0x99, 0xef, 0x84, 0xc9, 0x94, 0xb8, 0x74, 0x94, 0x4c, 0x4c, 0x33, 0xbb, - 0x30, 0x6a, 0x4c, 0xc9, 0x07, 0xea, 0x58, 0xf0, 0x2f, 0x06, 0xa1, 0xa4, 0x8c, 0xce, 0x68, 0xde, - 0xf4, 0x25, 0x38, 0x9b, 0xf4, 0x25, 0x18, 0xaa, 0xf8, 0x75, 0xc3, 0x7d, 0x60, 0x23, 0x23, 0x76, - 0x5f, 0xde, 0x06, 0xd8, 0xfb, 0x9b, 0x06, 0x4d, 0x93, 0x5f, 0xec, 0xd9, 0x29, 0xa1, 0xaf, 0xa3, - 0x71, 0xe0, 0x0a, 0x4c, 0x7a, 0x3e, 0x93, 0xd1, 0x49, 0x5d, 0x0a, 0x60, 0x4c, 0xce, 0x2a, 0xe9, - 0xc1, 0x70, 0x12, 0x04, 0x38, 0x5d, 0x86, 0x56, 0xc8, 0x05, 0xa5, 0xa4, 0x35, 0x82, 0xcb, 0x51, - 0x58, 0x60, 0xd1, 0x63, 0xd0, 0xdf, 0xf2, 0xeb, 0xe5, 0x8a, 0x90, 0xcf, 0xb5, 0x88, 0xb1, 0xf5, - 0x72, 0x05, 0x73, 0x1c, 0x5a, 0x80, 0x01, 0xf6, 0x23, 0x9c, 0x1e, 0xc9, 0x8f, 0x7a, 0xc2, 0x4a, - 0x68, 0x79, 0xae, 0x58, 0x01, 0x2c, 0x0a, 0x32, 0xad, 0x28, 0xbd, 0xd4, 0x30, 0xad, 0xe8, 0xe0, - 0x7d, 0x6a, 0x45, 0x25, 0x03, 0x1c, 0xf3, 0x42, 0x77, 0xe1, 0xb4, 0x71, 0x91, 0xe4, 0x53, 0x84, - 0x84, 0x22, 0xf2, 0xc2, 0x63, 0x1d, 0x6f, 0x90, 0xc2, 0x89, 0xe1, 0x9c, 0x68, 0xf4, 0xe9, 0x72, - 0x16, 0x27, 0x9c, 0x5d, 0x01, 0x6a, 0xc0, 0x64, 0x2d, 0x55, 0xeb, 0x50, 0xef, 0xb5, 0xaa, 0x01, - 0x4d, 0xd7, 0x98, 0x66, 0x8c, 0x5e, 0x81, 0xa1, 0x37, 0xfd, 0x90, 0x9d, 0x6d, 0xe2, 0x4e, 0x21, - 0x9f, 0xed, 0x0f, 0xbd, 0x7e, 0xa3, 0xca, 0xe0, 0x87, 0x07, 0xb3, 0xc3, 0x15, 0xbf, 0x2e, 0xff, - 0x62, 0x55, 0x00, 0x7d, 0xbf, 0x05, 0x33, 0xe9, 0x9b, 0xaa, 0x6a, 0xf4, 0x68, 0xef, 0x8d, 0xb6, - 0x45, 0xa5, 0x33, 0x2b, 0xb9, 0xec, 0x70, 0x87, 0xaa, 0xec, 0x5f, 0xb4, 0x98, 0x6e, 0x55, 0x18, - 0x07, 0x49, 0xd8, 0x6e, 0x9c, 0x44, 0x7a, 0xdf, 0x15, 0xc3, 0x6e, 0x79, 0xdf, 0x4e, 0x2d, 0xff, - 0xc4, 0x62, 0x4e, 0x2d, 0x27, 0xf8, 0x7a, 0xe5, 0x75, 0x18, 0x8a, 0x64, 0xda, 0xe5, 0x0e, 0x19, - 0x89, 0xb5, 0x46, 0x31, 0xc7, 0x1e, 0x25, 0xe1, 0xab, 0x0c, 0xcb, 0x8a, 0x8d, 0xfd, 0x0f, 0xf8, - 0x08, 0x48, 0xcc, 0x09, 0x98, 0x87, 0x96, 0x4d, 0xf3, 0xd0, 0x6c, 0x97, 0x2f, 0xc8, 0x31, 0x13, - 0xfd, 0x7d, 0xb3, 0xdd, 0x4c, 0xb3, 0xf5, 0x4e, 0xf7, 0xa6, 0xb2, 0x3f, 0x6f, 0x01, 0xc4, 0x01, - 0xb9, 0x7b, 0x48, 0xac, 0xf7, 0x12, 0x95, 0xe9, 0xfd, 0xc8, 0xaf, 0xf9, 0x0d, 0x61, 0xfc, 0x7c, - 0x24, 0xb6, 0x50, 0x71, 0xf8, 0xa1, 0xf6, 0x1b, 0x2b, 0x6a, 0x34, 0x2b, 0xc3, 0xff, 0x15, 0x63, - 0x9b, 0xa9, 0x11, 0xfa, 0xef, 0x8b, 0x16, 0x9c, 0xca, 0x72, 0x85, 0xa6, 0x37, 0x44, 0xae, 0xe3, - 0x53, 0x9e, 0x6e, 0x6a, 0x34, 0x6f, 0x09, 0x38, 0x56, 0x14, 0x3d, 0x67, 0x2c, 0x3c, 0x5a, 0x24, - 0xec, 0x1b, 0x30, 0x5a, 0x09, 0x88, 0x76, 0xb8, 0xbe, 0xca, 0x43, 0x4a, 0xf0, 0xf6, 0x3c, 0x73, - 0xe4, 0x70, 0x12, 0xf6, 0x97, 0x0b, 0x70, 0x8a, 0x3b, 0x8c, 0x2c, 0xec, 0xf9, 0x6e, 0xbd, 0xe2, - 0xd7, 0xc5, 0x83, 0xb7, 0x8f, 0xc1, 0x48, 0x4b, 0x53, 0xcc, 0x76, 0x8a, 0xea, 0xaa, 0x2b, 0x70, - 0x63, 0x55, 0x92, 0x0e, 0xc5, 0x06, 0x2f, 0x54, 0x87, 0x11, 0xb2, 0xe7, 0xd6, 0x94, 0xd7, 0x41, - 0xe1, 0xc8, 0x07, 0x9d, 0xaa, 0x65, 0x45, 0xe3, 0x83, 0x0d, 0xae, 0x0f, 0x20, 0x8f, 0xb8, 0xfd, - 0xa3, 0x16, 0x3c, 0x94, 0x13, 0x03, 0x96, 0x56, 0x77, 0x87, 0xb9, 0xe6, 0x88, 0x69, 0xab, 0xaa, - 0xe3, 0x0e, 0x3b, 0x58, 0x60, 0xd1, 0x47, 0x00, 0xb8, 0xc3, 0x0d, 0xf1, 0x6a, 0x5d, 0x83, 0x65, - 0x1a, 0x71, 0xfe, 0xb4, 0x90, 0x6d, 0xb2, 0x3c, 0xd6, 0x78, 0xd9, 0x5f, 0xec, 0x83, 0x7e, 0xe6, - 0xe0, 0x81, 0x2a, 0x30, 0xb8, 0xc3, 0xb3, 0xfa, 0x74, 0x1c, 0x37, 0x4a, 0x2b, 0x13, 0x05, 0xc5, - 0xe3, 0xa6, 0x41, 0xb1, 0x64, 0x83, 0xd6, 0x60, 0x8a, 0x27, 0x57, 0x6a, 0x2c, 0x93, 0x86, 0xb3, - 0x2f, 0x75, 0x9e, 0x3c, 0x13, 0xb0, 0xd2, 0xfd, 0x96, 0xd3, 0x24, 0x38, 0xab, 0x1c, 0x7a, 0x15, - 0xc6, 0xe8, 0x1d, 0xd4, 0x6f, 0x47, 0x92, 0x13, 0x4f, 0xab, 0xa4, 0xc4, 0xf2, 0x0d, 0x03, 0x8b, - 0x13, 0xd4, 0xe8, 0x15, 0x18, 0x6d, 0xa5, 0xb4, 0xbb, 0xfd, 0xb1, 0x1a, 0xc4, 0xd4, 0xe8, 0x9a, - 0xb4, 0xcc, 0x1b, 0xba, 0xcd, 0x7c, 0xbf, 0x37, 0x76, 0x02, 0x12, 0xee, 0xf8, 0x8d, 0x3a, 0x13, - 0xff, 0xfa, 0x35, 0x6f, 0xe8, 0x04, 0x1e, 0xa7, 0x4a, 0x50, 0x2e, 0x5b, 0x8e, 0xdb, 0x68, 0x07, - 0x24, 0xe6, 0x32, 0x60, 0x72, 0x59, 0x4d, 0xe0, 0x71, 0xaa, 0x44, 0x77, 0xb5, 0xf5, 0xe0, 0xf1, - 0xa8, 0xad, 0xed, 0x9f, 0x2a, 0x80, 0x31, 0xb4, 0xdf, 0xbe, 0xe9, 0x9e, 0xe8, 0x97, 0x6d, 0x07, - 0xad, 0x9a, 0x70, 0x66, 0xca, 0xfc, 0xb2, 0x38, 0x8b, 0x2b, 0xff, 0x32, 0xfa, 0x1f, 0xb3, 0x52, - 0x74, 0x8d, 0x9f, 0xae, 0x04, 0x3e, 0x3d, 0xe4, 0x64, 0xd0, 0x31, 0xf5, 0xe8, 0x60, 0x50, 0x3e, - 0xc8, 0xee, 0x10, 0x9e, 0x53, 0xb8, 0x65, 0x73, 0x0e, 0x86, 0xdf, 0x4f, 0x55, 0x44, 0x46, 0x90, - 0x5c, 0xd0, 0x65, 0x18, 0x16, 0x39, 0x7c, 0x98, 0x6f, 0x3c, 0x5f, 0x4c, 0xcc, 0x4f, 0x69, 0x39, - 0x06, 0x63, 0x9d, 0xc6, 0xfe, 0x81, 0x02, 0x4c, 0x65, 0x3c, 0x6e, 0xe2, 0xc7, 0xc8, 0xb6, 0x1b, - 0x46, 0x2a, 0x51, 0xac, 0x76, 0x8c, 0x70, 0x38, 0x56, 0x14, 0x74, 0xaf, 0xe2, 0x07, 0x55, 0xf2, - 0x70, 0x12, 0x8f, 0x07, 0x04, 0xf6, 0x88, 0x29, 0x57, 0x2f, 0x40, 0x5f, 0x3b, 0x24, 0x32, 0xb0, - 0xae, 0x3a, 0xb6, 0x99, 0x4d, 0x97, 0x61, 0xe8, 0x35, 0x6a, 0x5b, 0x99, 0x47, 0xb5, 0x6b, 0x14, - 0x37, 0x90, 0x72, 0x1c, 0x6d, 0x5c, 0x44, 0x3c, 0xc7, 0x8b, 0xc4, 0x65, 0x2b, 0x8e, 0x10, 0xc9, - 0xa0, 0x58, 0x60, 0xed, 0x2f, 0x14, 0xe1, 0x6c, 0xee, 0x73, 0x47, 0xda, 0xf4, 0xa6, 0xef, 0xb9, - 0x91, 0xaf, 0x1c, 0xc0, 0x78, 0x54, 0x48, 0xd2, 0xda, 0x59, 0x13, 0x70, 0xac, 0x28, 0xd0, 0x45, - 0xe8, 0x67, 0x1a, 0xe1, 0x54, 0xca, 0xdc, 0xc5, 0x65, 0x1e, 0x26, 0x8c, 0xa3, 0x7b, 0xce, 0x72, - 0xfe, 0x18, 0x95, 0x60, 0xfc, 0x46, 0xf2, 0x40, 0xa1, 0xcd, 0xf5, 0xfd, 0x06, 0x66, 0x48, 0xf4, - 0x84, 0xe8, 0xaf, 0x84, 0xc7, 0x13, 0x76, 0xea, 0x7e, 0xa8, 0x75, 0xda, 0x53, 0x30, 0xb8, 0x4b, - 0xf6, 0x03, 0xd7, 0xdb, 0x4e, 0x7a, 0xc2, 0x5d, 0xe3, 0x60, 0x2c, 0xf1, 0x66, 0x8e, 0xc7, 0xc1, - 0xe3, 0x4e, 0x4f, 0x3e, 0xd4, 0x55, 0x3c, 0xf9, 0xa1, 0x22, 0x8c, 0xe3, 0xc5, 0xe5, 0x77, 0x07, - 0xe2, 0x66, 0x7a, 0x20, 0x8e, 0x3b, 0x3d, 0x79, 0xf7, 0xd1, 0xf8, 0x39, 0x0b, 0xc6, 0x59, 0x26, - 0x21, 0x11, 0xd4, 0xc0, 0xf5, 0xbd, 0x13, 0xb8, 0x0a, 0x3c, 0x06, 0xfd, 0x01, 0xad, 0x34, 0x99, - 0x2b, 0x97, 0xb5, 0x04, 0x73, 0x1c, 0x7a, 0x04, 0xfa, 0x58, 0x13, 0xe8, 0xe0, 0x8d, 0xf0, 0x2d, - 0x78, 0xd9, 0x89, 0x1c, 0xcc, 0xa0, 0x2c, 0x48, 0x16, 0x26, 0xad, 0x86, 0xcb, 0x1b, 0x1d, 0xdb, - 0xeb, 0xdf, 0x19, 0x81, 0x10, 0x32, 0x9b, 0xf6, 0xf6, 0x82, 0x64, 0x65, 0xb3, 0xec, 0x7c, 0xcd, - 0xfe, 0xc3, 0x02, 0x9c, 0xcf, 0x2c, 0xd7, 0x73, 0x90, 0xac, 0xce, 0xa5, 0x1f, 0x64, 0xae, 0x98, - 0xe2, 0x09, 0xfa, 0x19, 0xf7, 0xf5, 0x2a, 0xfd, 0xf7, 0xf7, 0x10, 0xbb, 0x2a, 0xb3, 0xcb, 0xde, - 0x21, 0xb1, 0xab, 0x32, 0xdb, 0x96, 0xa3, 0x26, 0xf8, 0xd3, 0x42, 0xce, 0xb7, 0x30, 0x85, 0xc1, - 0x25, 0xba, 0xcf, 0x30, 0x64, 0x28, 0x2f, 0xe1, 0x7c, 0x8f, 0xe1, 0x30, 0xac, 0xb0, 0x68, 0x01, - 0xc6, 0x9b, 0xae, 0x47, 0x37, 0x9f, 0x7d, 0x53, 0x14, 0x57, 0x8a, 0xfc, 0x35, 0x13, 0x8d, 0x93, - 0xf4, 0xc8, 0xd5, 0xe2, 0x5a, 0xf1, 0xaf, 0x7b, 0xe5, 0x48, 0xab, 0x6e, 0xce, 0xf4, 0x65, 0x50, - 0xbd, 0x98, 0x11, 0xe3, 0x6a, 0x4d, 0xd3, 0x13, 0x15, 0x7b, 0xd7, 0x13, 0x8d, 0x64, 0xeb, 0x88, - 0x66, 0x5e, 0x81, 0xd1, 0xfb, 0x36, 0x0c, 0xd8, 0x5f, 0x2f, 0xc2, 0xc3, 0x1d, 0x96, 0x3d, 0xdf, - 0xeb, 0x8d, 0x31, 0xd0, 0xf6, 0xfa, 0xd4, 0x38, 0x54, 0xe0, 0xd4, 0x56, 0xbb, 0xd1, 0xd8, 0x67, - 0xcf, 0x6f, 0x48, 0x5d, 0x52, 0x08, 0x99, 0x52, 0x2a, 0x47, 0x4e, 0xad, 0x66, 0xd0, 0xe0, 0xcc, - 0x92, 0xf4, 0x8a, 0x45, 0x4f, 0x92, 0x7d, 0xc5, 0x2a, 0x71, 0xc5, 0xc2, 0x3a, 0x12, 0x9b, 0xb4, - 0xe8, 0x0a, 0x4c, 0x3a, 0x7b, 0x8e, 0xcb, 0x83, 0x83, 0x4b, 0x06, 0xfc, 0x8e, 0xa5, 0xf4, 0xb9, - 0x0b, 0x49, 0x02, 0x9c, 0x2e, 0x83, 0x5e, 0x03, 0xe4, 0x6f, 0x32, 0x27, 0xfd, 0xfa, 0x15, 0xe2, - 0x09, 0x93, 0x33, 0x1b, 0xbb, 0x62, 0xbc, 0x25, 0xdc, 0x48, 0x51, 0xe0, 0x8c, 0x52, 0x89, 0x20, - 0x4e, 0x03, 0xf9, 0x41, 0x9c, 0x3a, 0xef, 0x8b, 0x5d, 0xd3, 0x14, 0x5d, 0x86, 0xd1, 0x23, 0xba, - 0x9e, 0xda, 0xff, 0xc1, 0xa2, 0x27, 0x1e, 0x2f, 0x63, 0x46, 0x48, 0x7d, 0x85, 0xf9, 0xc6, 0x72, - 0xf5, 0xb0, 0x16, 0x25, 0xe7, 0xb4, 0xe6, 0x1b, 0x1b, 0x23, 0xb1, 0x49, 0xcb, 0xe7, 0x90, 0xe6, - 0xd3, 0x6a, 0xdc, 0x0a, 0x44, 0x18, 0x37, 0x45, 0x81, 0x3e, 0x0a, 0x83, 0x75, 0x77, 0xcf, 0x0d, - 0x85, 0x72, 0xec, 0xc8, 0x96, 0xa8, 0x78, 0xeb, 0x5c, 0xe6, 0x6c, 0xb0, 0xe4, 0x67, 0xff, 0x50, - 0x21, 0xee, 0x93, 0xd7, 0xdb, 0x7e, 0xe4, 0x9c, 0xc0, 0x49, 0x7e, 0xc5, 0x38, 0xc9, 0x9f, 0xe8, - 0x14, 0xcb, 0x8e, 0x35, 0x29, 0xf7, 0x04, 0xbf, 0x91, 0x38, 0xc1, 0x9f, 0xec, 0xce, 0xaa, 0xf3, - 0xc9, 0xfd, 0x0f, 0x2d, 0x98, 0x34, 0xe8, 0x4f, 0xe0, 0x00, 0x59, 0x35, 0x0f, 0x90, 0x47, 0xbb, - 0x7e, 0x43, 0xce, 0xc1, 0xf1, 0xbd, 0xc5, 0x44, 0xdb, 0xd9, 0x81, 0xf1, 0x26, 0xf4, 0xed, 0x38, - 0x41, 0xbd, 0x53, 0xee, 0x8e, 0x54, 0xa1, 0xb9, 0xab, 0x4e, 0x20, 0xcc, 0xf4, 0xcf, 0xc8, 0x5e, - 0xa7, 0xa0, 0xae, 0x26, 0x7a, 0x56, 0x15, 0x7a, 0x09, 0x06, 0xc2, 0x9a, 0xdf, 0x52, 0xef, 0x75, - 0x2e, 0xb0, 0x8e, 0x66, 0x90, 0xc3, 0x83, 0x59, 0x64, 0x56, 0x47, 0xc1, 0x58, 0xd0, 0xa3, 0x8f, - 0xc1, 0x28, 0xfb, 0xa5, 0x7c, 0xe6, 0x8a, 0xf9, 0x1a, 0x8c, 0xaa, 0x4e, 0xc8, 0x1d, 0x4a, 0x0d, - 0x10, 0x36, 0x59, 0xcd, 0x6c, 0x43, 0x49, 0x7d, 0xd6, 0x03, 0x35, 0xf5, 0xfe, 0xbb, 0x22, 0x4c, - 0x65, 0xcc, 0x39, 0x14, 0x1a, 0x23, 0x71, 0xb9, 0xc7, 0xa9, 0xfa, 0x36, 0xc7, 0x22, 0x64, 0x17, - 0xa8, 0xba, 0x98, 0x5b, 0x3d, 0x57, 0x7a, 0x33, 0x24, 0xc9, 0x4a, 0x29, 0xa8, 0x7b, 0xa5, 0xb4, - 0xb2, 0x13, 0xeb, 0x6a, 0x5a, 0x91, 0x6a, 0xe9, 0x03, 0x1d, 0xd3, 0x5f, 0xee, 0x83, 0x53, 0x59, - 0xe1, 0x35, 0xd1, 0x67, 0x12, 0x99, 0x63, 0x5f, 0xe8, 0x35, 0x30, 0x27, 0x4f, 0x27, 0x2b, 0xc2, - 0xfe, 0xcd, 0x99, 0xb9, 0x64, 0xbb, 0x76, 0xb3, 0xa8, 0x93, 0x05, 0x1e, 0x09, 0x78, 0xc6, 0x5f, - 0xb9, 0x7d, 0xbc, 0xbf, 0xe7, 0x06, 0x88, 0x54, 0xc1, 0x61, 0xc2, 0x1f, 0x47, 0x82, 0xbb, 0xfb, - 0xe3, 0xc8, 0x9a, 0x51, 0x19, 0x06, 0x6a, 0xdc, 0xd1, 0xa3, 0xd8, 0x7d, 0x0b, 0xe3, 0x5e, 0x1e, - 0x6a, 0x03, 0x16, 0xde, 0x1d, 0x82, 0xc1, 0x8c, 0x0b, 0xc3, 0x5a, 0xc7, 0x3c, 0xd0, 0xc9, 0xb3, - 0x4b, 0x0f, 0x3e, 0xad, 0x0b, 0x1e, 0xe8, 0x04, 0xfa, 0x51, 0x0b, 0x12, 0xaf, 0x3d, 0x94, 0x52, - 0xce, 0xca, 0x55, 0xca, 0x5d, 0x80, 0xbe, 0xc0, 0x6f, 0x90, 0x64, 0xb6, 0x56, 0xec, 0x37, 0x08, - 0x66, 0x18, 0x4a, 0x11, 0xc5, 0xaa, 0x96, 0x11, 0xfd, 0x1a, 0x29, 0x2e, 0x88, 0x8f, 0x41, 0x7f, - 0x83, 0xec, 0x91, 0x46, 0x32, 0xa9, 0xd6, 0x75, 0x0a, 0xc4, 0x1c, 0x67, 0xff, 0x5c, 0x1f, 0x9c, - 0xeb, 0x18, 0x05, 0x88, 0x5e, 0xc6, 0xb6, 0x9d, 0x88, 0xdc, 0x71, 0xf6, 0x93, 0xd9, 0x6f, 0xae, - 0x70, 0x30, 0x96, 0x78, 0xf6, 0xf4, 0x90, 0x07, 0xb1, 0x4f, 0xa8, 0x30, 0x45, 0xec, 0x7a, 0x81, - 0x35, 0x55, 0x62, 0xc5, 0xe3, 0x50, 0x89, 0x3d, 0x07, 0x10, 0x86, 0x0d, 0xee, 0x13, 0x57, 0x17, - 0x6f, 0x1a, 0xe3, 0x64, 0x07, 0xd5, 0xeb, 0x02, 0x83, 0x35, 0x2a, 0xb4, 0x0c, 0x13, 0xad, 0xc0, - 0x8f, 0xb8, 0x46, 0x78, 0x99, 0xbb, 0x8d, 0xf6, 0x9b, 0x01, 0x58, 0x2a, 0x09, 0x3c, 0x4e, 0x95, - 0x40, 0x2f, 0xc2, 0xb0, 0x08, 0xca, 0x52, 0xf1, 0xfd, 0x86, 0x50, 0x42, 0x29, 0x4f, 0xca, 0x6a, - 0x8c, 0xc2, 0x3a, 0x9d, 0x56, 0x8c, 0xa9, 0x99, 0x07, 0x33, 0x8b, 0x71, 0x55, 0xb3, 0x46, 0x97, - 0x88, 0xda, 0x3b, 0xd4, 0x53, 0xd4, 0xde, 0x58, 0x2d, 0x57, 0xea, 0xd9, 0xea, 0x09, 0x5d, 0x15, - 0x59, 0x5f, 0xe9, 0x83, 0x29, 0x31, 0x71, 0x1e, 0xf4, 0x74, 0xb9, 0x99, 0x9e, 0x2e, 0xc7, 0xa1, - 0xb8, 0x7b, 0x77, 0xce, 0x9c, 0xf4, 0x9c, 0xf9, 0x61, 0x0b, 0x4c, 0x49, 0x0d, 0xfd, 0x99, 0xdc, - 0xf4, 0x61, 0x2f, 0xe6, 0x4a, 0x7e, 0xca, 0x6b, 0xf0, 0x6d, 0x26, 0x12, 0xb3, 0xff, 0xbd, 0x05, - 0x8f, 0x76, 0xe5, 0x88, 0x56, 0xa0, 0xc4, 0xc4, 0x49, 0xed, 0xa2, 0xf7, 0xa4, 0x72, 0x2b, 0x97, - 0x88, 0x1c, 0xe9, 0x36, 0x2e, 0x89, 0x56, 0x52, 0x79, 0xda, 0x9e, 0xca, 0xc8, 0xd3, 0x76, 0xda, - 0xe8, 0x9e, 0xfb, 0x4c, 0xd4, 0xf6, 0x83, 0xf4, 0xc4, 0x31, 0x9e, 0x74, 0xa1, 0xf7, 0x1b, 0x4a, - 0x47, 0x3b, 0xa1, 0x74, 0x44, 0x26, 0xb5, 0x76, 0x86, 0x7c, 0x18, 0x26, 0x58, 0xb4, 0x36, 0xf6, - 0xc8, 0x41, 0x3c, 0x36, 0x2b, 0xc4, 0x8e, 0xcc, 0xd7, 0x13, 0x38, 0x9c, 0xa2, 0xb6, 0xff, 0xa0, - 0x08, 0x03, 0x7c, 0xf9, 0x9d, 0xc0, 0xf5, 0xf2, 0x69, 0x28, 0xb9, 0xcd, 0x66, 0x9b, 0xa7, 0xde, - 0xea, 0x8f, 0xdd, 0x62, 0xcb, 0x12, 0x88, 0x63, 0x3c, 0x5a, 0x15, 0xfa, 0xee, 0x0e, 0x01, 0x61, - 0x79, 0xc3, 0xe7, 0x96, 0x9d, 0xc8, 0xe1, 0xb2, 0x92, 0x3a, 0x67, 0x63, 0xcd, 0x38, 0xfa, 0x24, - 0x40, 0x18, 0x05, 0xae, 0xb7, 0x4d, 0x61, 0x22, 0x0e, 0xf5, 0x7b, 0x3b, 0x70, 0xab, 0x2a, 0x62, - 0xce, 0x33, 0xde, 0x73, 0x14, 0x02, 0x6b, 0x1c, 0xd1, 0x9c, 0x71, 0xd2, 0xcf, 0x24, 0xc6, 0x0e, - 0x38, 0xd7, 0x78, 0xcc, 0x66, 0x3e, 0x00, 0x25, 0xc5, 0xbc, 0x9b, 0xf6, 0x6b, 0x44, 0x17, 0x8b, - 0x3e, 0x04, 0xe3, 0x89, 0xb6, 0x1d, 0x49, 0x79, 0xf6, 0xf3, 0x16, 0x8c, 0xf3, 0xc6, 0xac, 0x78, - 0x7b, 0xe2, 0x34, 0x78, 0x0b, 0x4e, 0x35, 0x32, 0x76, 0x65, 0x31, 0xfc, 0xbd, 0xef, 0xe2, 0x4a, - 0x59, 0x96, 0x85, 0xc5, 0x99, 0x75, 0xa0, 0x4b, 0x74, 0xc5, 0xd1, 0x5d, 0xd7, 0x69, 0x88, 0xb7, - 0xf5, 0x23, 0x7c, 0xb5, 0x71, 0x18, 0x56, 0x58, 0xfb, 0xb7, 0x2d, 0x98, 0xe4, 0x2d, 0xbf, 0x46, - 0xf6, 0xd5, 0xde, 0xf4, 0xcd, 0x6c, 0xbb, 0x48, 0xfa, 0x58, 0xc8, 0x49, 0xfa, 0xa8, 0x7f, 0x5a, - 0xb1, 0xe3, 0xa7, 0x7d, 0xd9, 0x02, 0x31, 0x43, 0x4e, 0x40, 0x9f, 0xf1, 0x9d, 0xa6, 0x3e, 0x63, - 0x26, 0x7f, 0x11, 0xe4, 0x28, 0x32, 0xfe, 0xc4, 0x82, 0x09, 0x4e, 0x10, 0xdb, 0xea, 0xbf, 0xa9, - 0xe3, 0xd0, 0x4b, 0x6a, 0xf8, 0x6b, 0x64, 0x7f, 0xc3, 0xaf, 0x38, 0xd1, 0x4e, 0xf6, 0x47, 0x19, - 0x83, 0xd5, 0xd7, 0x71, 0xb0, 0xea, 0x72, 0x01, 0x19, 0x39, 0x91, 0xba, 0xbc, 0x90, 0x3f, 0x6a, - 0x4e, 0x24, 0xfb, 0x1b, 0x16, 0x20, 0x5e, 0x8d, 0x21, 0xb8, 0x51, 0x71, 0x88, 0x41, 0xb5, 0x83, - 0x2e, 0xde, 0x9a, 0x14, 0x06, 0x6b, 0x54, 0xc7, 0xd2, 0x3d, 0x09, 0x87, 0x8b, 0x62, 0x77, 0x87, - 0x8b, 0x23, 0xf4, 0xe8, 0xbf, 0x1c, 0x80, 0xe4, 0xb3, 0x36, 0x74, 0x0b, 0x46, 0x6a, 0x4e, 0xcb, - 0xd9, 0x74, 0x1b, 0x6e, 0xe4, 0x92, 0xb0, 0x93, 0x37, 0xd6, 0x92, 0x46, 0x27, 0x4c, 0xe4, 0x1a, - 0x04, 0x1b, 0x7c, 0xd0, 0x1c, 0x40, 0x2b, 0x70, 0xf7, 0xdc, 0x06, 0xd9, 0x66, 0x6a, 0x17, 0x16, - 0xcd, 0x83, 0xbb, 0x86, 0x49, 0x28, 0xd6, 0x28, 0x32, 0x62, 0x08, 0x14, 0x1f, 0x70, 0x0c, 0x01, - 0x38, 0xb1, 0x18, 0x02, 0x7d, 0x47, 0x8a, 0x21, 0x30, 0x74, 0xe4, 0x18, 0x02, 0xfd, 0x3d, 0xc5, - 0x10, 0xc0, 0x70, 0x46, 0xca, 0x9e, 0xf4, 0xff, 0xaa, 0xdb, 0x20, 0xe2, 0xc2, 0xc1, 0x43, 0x90, - 0xcc, 0xdc, 0x3b, 0x98, 0x3d, 0x83, 0x33, 0x29, 0x70, 0x4e, 0x49, 0xf4, 0x11, 0x98, 0x76, 0x1a, - 0x0d, 0xff, 0x8e, 0x1a, 0xd4, 0x95, 0xb0, 0xe6, 0x34, 0xb8, 0x09, 0x64, 0x90, 0x71, 0x7d, 0xe4, - 0xde, 0xc1, 0xec, 0xf4, 0x42, 0x0e, 0x0d, 0xce, 0x2d, 0x8d, 0x3e, 0x08, 0xa5, 0x56, 0xe0, 0xd7, - 0xd6, 0xb4, 0xb7, 0xb7, 0xe7, 0x69, 0x07, 0x56, 0x24, 0xf0, 0xf0, 0x60, 0x76, 0x54, 0xfd, 0x61, - 0x07, 0x7e, 0x5c, 0x20, 0x23, 0x28, 0xc0, 0xf0, 0xb1, 0x06, 0x05, 0xd8, 0x85, 0xa9, 0x2a, 0x09, - 0x5c, 0xa7, 0xe1, 0xbe, 0x45, 0xe5, 0x65, 0xb9, 0x3f, 0x6d, 0x40, 0x29, 0x48, 0xec, 0xc8, 0x3d, - 0x05, 0x69, 0xd5, 0x92, 0xd3, 0xc8, 0x1d, 0x38, 0x66, 0x64, 0xff, 0x6f, 0x0b, 0x06, 0xc5, 0x33, - 0xb6, 0x13, 0x90, 0x1a, 0x17, 0x0c, 0xa3, 0xc4, 0x6c, 0x76, 0x87, 0xb1, 0xc6, 0xe4, 0x9a, 0x23, - 0xca, 0x09, 0x73, 0xc4, 0xa3, 0x9d, 0x98, 0x74, 0x36, 0x44, 0xfc, 0xd5, 0x22, 0x95, 0xde, 0x8d, - 0x07, 0xd5, 0x0f, 0xbe, 0x0b, 0xd6, 0x61, 0x30, 0x14, 0x0f, 0x7a, 0x0b, 0xf9, 0x2f, 0x2a, 0x92, - 0x83, 0x18, 0x7b, 0xd1, 0x89, 0x27, 0xbc, 0x92, 0x49, 0xe6, 0x4b, 0xe1, 0xe2, 0x03, 0x7c, 0x29, - 0xdc, 0xed, 0xc9, 0x79, 0xdf, 0x71, 0x3c, 0x39, 0xb7, 0xbf, 0xc6, 0x4e, 0x4e, 0x1d, 0x7e, 0x02, - 0x42, 0xd5, 0x15, 0xf3, 0x8c, 0xb5, 0x3b, 0xcc, 0x2c, 0xd1, 0xa8, 0x1c, 0xe1, 0xea, 0x67, 0x2d, - 0x38, 0x97, 0xf1, 0x55, 0x9a, 0xa4, 0xf5, 0x0c, 0x0c, 0x39, 0xed, 0xba, 0xab, 0xd6, 0xb2, 0x66, - 0x9a, 0x5c, 0x10, 0x70, 0xac, 0x28, 0xd0, 0x12, 0x4c, 0x92, 0xbb, 0x2d, 0x97, 0x1b, 0x72, 0x75, - 0xe7, 0xe3, 0x22, 0x7f, 0xfb, 0xb8, 0x92, 0x44, 0xe2, 0x34, 0xbd, 0x0a, 0x4e, 0x54, 0xcc, 0x0d, - 0x4e, 0xf4, 0xb7, 0x2d, 0x18, 0x56, 0x4f, 0x5a, 0x1f, 0x78, 0x6f, 0x7f, 0xd8, 0xec, 0xed, 0x87, - 0x3b, 0xf4, 0x76, 0x4e, 0x37, 0xff, 0x66, 0x41, 0xb5, 0xb7, 0xe2, 0x07, 0x51, 0x0f, 0x12, 0xdc, - 0xfd, 0x3f, 0x9c, 0xb8, 0x0c, 0xc3, 0x4e, 0xab, 0x25, 0x11, 0xd2, 0x03, 0x8e, 0x85, 0xdc, 0x8e, - 0xc1, 0x58, 0xa7, 0x51, 0xef, 0x38, 0x8a, 0xb9, 0xef, 0x38, 0xea, 0x00, 0x91, 0x13, 0x6c, 0x93, - 0x88, 0xc2, 0x84, 0xc3, 0x6e, 0xfe, 0x7e, 0xd3, 0x8e, 0xdc, 0xc6, 0x9c, 0xeb, 0x45, 0x61, 0x14, - 0xcc, 0x95, 0xbd, 0xe8, 0x46, 0xc0, 0xaf, 0x90, 0x5a, 0x78, 0x2f, 0xc5, 0x0b, 0x6b, 0x7c, 0x65, - 0xf8, 0x06, 0x56, 0x47, 0xbf, 0xe9, 0x4a, 0xb1, 0x2e, 0xe0, 0x58, 0x51, 0xd8, 0x1f, 0x60, 0xa7, - 0x0f, 0xeb, 0xd3, 0xa3, 0x85, 0xb6, 0xfa, 0xf2, 0x88, 0x1a, 0x0d, 0x66, 0x14, 0x5d, 0xd6, 0x03, - 0x68, 0x75, 0xde, 0xec, 0x69, 0xc5, 0xfa, 0xab, 0xc2, 0x38, 0xca, 0x16, 0xfa, 0x78, 0xca, 0x3d, - 0xe6, 0xd9, 0x2e, 0xa7, 0xc6, 0x11, 0x1c, 0x62, 0x58, 0xfe, 0x1d, 0x96, 0x9d, 0xa4, 0x5c, 0x11, - 0xeb, 0x42, 0xcb, 0xbf, 0x23, 0x10, 0x38, 0xa6, 0xa1, 0xc2, 0x94, 0xfa, 0x13, 0x4e, 0xa3, 0x38, - 0x0e, 0xad, 0xa2, 0x0e, 0xb1, 0x46, 0x81, 0xe6, 0x85, 0x42, 0x81, 0xdb, 0x05, 0x1e, 0x4e, 0x28, - 0x14, 0x64, 0x77, 0x69, 0x5a, 0xa0, 0xcb, 0x30, 0xac, 0xb2, 0xad, 0x57, 0x78, 0xe6, 0x2b, 0x31, - 0xcd, 0x56, 0x62, 0x30, 0xd6, 0x69, 0xd0, 0x06, 0x8c, 0x87, 0x5c, 0xcf, 0xa6, 0x82, 0x83, 0x73, - 0x7d, 0xe5, 0x7b, 0xd5, 0x63, 0x62, 0x13, 0x7d, 0xc8, 0x40, 0x7c, 0x77, 0x92, 0x21, 0x16, 0x92, - 0x2c, 0xd0, 0xab, 0x30, 0xd6, 0xf0, 0x9d, 0xfa, 0xa2, 0xd3, 0x70, 0xbc, 0x1a, 0xeb, 0x9f, 0x21, - 0x33, 0x69, 0xef, 0x75, 0x03, 0x8b, 0x13, 0xd4, 0x54, 0x78, 0xd3, 0x21, 0x22, 0x44, 0x98, 0xe3, - 0x6d, 0x93, 0x50, 0xe4, 0xce, 0x66, 0xc2, 0xdb, 0xf5, 0x1c, 0x1a, 0x9c, 0x5b, 0x1a, 0xbd, 0x04, - 0x23, 0xf2, 0xf3, 0xb5, 0x88, 0x24, 0xf1, 0x93, 0x18, 0x0d, 0x87, 0x0d, 0x4a, 0x74, 0x07, 0x4e, - 0xcb, 0xff, 0x1b, 0x81, 0xb3, 0xb5, 0xe5, 0xd6, 0xc4, 0x33, 0x7d, 0xfe, 0x76, 0x76, 0x41, 0x3e, - 0xf0, 0x5c, 0xc9, 0x22, 0x3a, 0x3c, 0x98, 0xbd, 0x20, 0x7a, 0x2d, 0x13, 0xcf, 0x06, 0x31, 0x9b, - 0x3f, 0x5a, 0x83, 0xa9, 0x1d, 0xe2, 0x34, 0xa2, 0x9d, 0xa5, 0x1d, 0x52, 0xdb, 0x95, 0x8b, 0x8e, - 0xc5, 0x39, 0xd1, 0x9e, 0x8f, 0x5c, 0x4d, 0x93, 0xe0, 0xac, 0x72, 0xe8, 0x0d, 0x98, 0x6e, 0xb5, - 0x37, 0x1b, 0x6e, 0xb8, 0xb3, 0xee, 0x47, 0xcc, 0x11, 0x49, 0x25, 0x6f, 0x17, 0x01, 0x51, 0x54, - 0x24, 0x99, 0x4a, 0x0e, 0x1d, 0xce, 0xe5, 0x80, 0xde, 0x82, 0xd3, 0x89, 0xc9, 0x20, 0x42, 0x42, - 0x8c, 0xe5, 0xa7, 0x07, 0xa9, 0x66, 0x15, 0x10, 0xd1, 0x55, 0xb2, 0x50, 0x38, 0xbb, 0x0a, 0xf4, - 0x32, 0x80, 0xdb, 0x5a, 0x75, 0x9a, 0x6e, 0x83, 0x5e, 0x17, 0xa7, 0xd8, 0x3c, 0xa1, 0x57, 0x07, - 0x28, 0x57, 0x24, 0x94, 0xee, 0xcf, 0xe2, 0xdf, 0x3e, 0xd6, 0xa8, 0xd1, 0x75, 0x18, 0x13, 0xff, - 0xf6, 0xc5, 0xb0, 0xf2, 0xc8, 0x24, 0x8f, 0xb3, 0xb0, 0x52, 0x15, 0x1d, 0x73, 0x98, 0x82, 0xe0, - 0x44, 0x59, 0xb4, 0x0d, 0xe7, 0x64, 0xaa, 0x37, 0x7d, 0x8e, 0xca, 0x31, 0x08, 0x59, 0x4e, 0x8e, - 0x21, 0xfe, 0x32, 0x65, 0xa1, 0x13, 0x21, 0xee, 0xcc, 0x87, 0x9e, 0xed, 0xfa, 0x54, 0xe7, 0x6f, - 0x77, 0x4f, 0x73, 0x2f, 0x27, 0x7a, 0xb6, 0x5f, 0x4f, 0x22, 0x71, 0x9a, 0x1e, 0x85, 0x70, 0xda, - 0xf5, 0xb2, 0x66, 0xf6, 0x19, 0xc6, 0xe8, 0x43, 0xfc, 0xd9, 0x72, 0xe7, 0x59, 0x9d, 0x89, 0xe7, - 0xb3, 0x3a, 0x93, 0xf7, 0xdb, 0xf3, 0xff, 0xfb, 0x2d, 0x8b, 0x96, 0xd6, 0xa4, 0x74, 0xf4, 0x29, - 0x18, 0xd1, 0x3f, 0x4c, 0x48, 0x1c, 0x17, 0xb3, 0x85, 0x58, 0x6d, 0x6f, 0xe0, 0x32, 0xbe, 0x5a, - 0xff, 0x3a, 0x0e, 0x1b, 0x1c, 0x51, 0x2d, 0xe3, 0x81, 0xff, 0x7c, 0x6f, 0x12, 0x4d, 0xef, 0xee, - 0x6f, 0x04, 0xb2, 0xa7, 0x3c, 0xba, 0x0e, 0x43, 0xb5, 0x86, 0x4b, 0xbc, 0xa8, 0x5c, 0xe9, 0x14, - 0xc2, 0x70, 0x49, 0xd0, 0x88, 0x35, 0x24, 0x52, 0x6c, 0x70, 0x18, 0x56, 0x1c, 0xec, 0x5f, 0x2d, - 0xc0, 0x6c, 0x97, 0x7c, 0x2d, 0x09, 0x73, 0x94, 0xd5, 0x93, 0x39, 0x6a, 0x01, 0xc6, 0xe3, 0x7f, - 0xba, 0xa6, 0x4b, 0x79, 0xb4, 0xde, 0x32, 0xd1, 0x38, 0x49, 0xdf, 0xf3, 0xe3, 0x04, 0xdd, 0xa2, - 0xd5, 0xd7, 0xf5, 0x79, 0x8d, 0x61, 0xc9, 0xee, 0xef, 0xfd, 0xfa, 0x9b, 0x6b, 0x95, 0xb4, 0xbf, - 0x56, 0x80, 0xd3, 0xaa, 0x0b, 0xbf, 0x7d, 0x3b, 0xee, 0x66, 0xba, 0xe3, 0x8e, 0xc1, 0xa6, 0x6b, - 0xdf, 0x80, 0x01, 0x1e, 0x93, 0xb1, 0x07, 0xb1, 0xfb, 0x31, 0x33, 0x52, 0xb3, 0x92, 0xf4, 0x8c, - 0x68, 0xcd, 0xdf, 0x6f, 0xc1, 0x78, 0xe2, 0x95, 0x1b, 0xc2, 0xda, 0x53, 0xe8, 0xfb, 0x11, 0x8d, - 0xb3, 0x84, 0xee, 0x0b, 0xd0, 0xb7, 0xe3, 0x87, 0x51, 0xd2, 0xe1, 0xe3, 0xaa, 0x1f, 0x46, 0x98, - 0x61, 0xec, 0xdf, 0xb1, 0xa0, 0x7f, 0xc3, 0x71, 0xbd, 0x48, 0x1a, 0x07, 0xac, 0x1c, 0xe3, 0x40, - 0x2f, 0xdf, 0x85, 0x5e, 0x84, 0x01, 0xb2, 0xb5, 0x45, 0x6a, 0x91, 0x18, 0x55, 0x19, 0x47, 0x62, - 0x60, 0x85, 0x41, 0xa9, 0x1c, 0xc8, 0x2a, 0xe3, 0x7f, 0xb1, 0x20, 0x46, 0xb7, 0xa1, 0x14, 0xb9, - 0x4d, 0xb2, 0x50, 0xaf, 0x0b, 0x93, 0xf9, 0x7d, 0xc4, 0xc2, 0xd8, 0x90, 0x0c, 0x70, 0xcc, 0xcb, - 0xfe, 0x42, 0x01, 0x20, 0x0e, 0x66, 0xd5, 0xed, 0x13, 0x17, 0x53, 0xc6, 0xd4, 0x8b, 0x19, 0xc6, - 0x54, 0x14, 0x33, 0xcc, 0xb0, 0xa4, 0xaa, 0x6e, 0x2a, 0xf6, 0xd4, 0x4d, 0x7d, 0x47, 0xe9, 0xa6, - 0x25, 0x98, 0x8c, 0x83, 0x71, 0x99, 0xb1, 0x08, 0xd9, 0xf1, 0xb9, 0x91, 0x44, 0xe2, 0x34, 0xbd, - 0x4d, 0xe0, 0x82, 0x8a, 0x49, 0x24, 0x4e, 0x34, 0xe6, 0x0f, 0xae, 0x1b, 0xa7, 0xbb, 0xf4, 0x53, - 0x6c, 0x2d, 0x2e, 0xe4, 0x5a, 0x8b, 0x7f, 0xc2, 0x82, 0x53, 0xc9, 0x7a, 0xd8, 0xe3, 0xe9, 0xcf, - 0x5b, 0x70, 0x9a, 0xd9, 0xcc, 0x59, 0xad, 0x69, 0x0b, 0xfd, 0x0b, 0x1d, 0xe3, 0x2c, 0xe5, 0xb4, - 0x38, 0x0e, 0x58, 0xb2, 0x96, 0xc5, 0x1a, 0x67, 0xd7, 0x68, 0xff, 0xaf, 0x3e, 0x98, 0xce, 0x0b, - 0xd0, 0xc4, 0x9e, 0x8b, 0x38, 0x77, 0xab, 0xbb, 0xe4, 0x8e, 0x70, 0xca, 0x8f, 0x9f, 0x8b, 0x70, - 0x30, 0x96, 0xf8, 0x64, 0x0a, 0x8e, 0x42, 0x8f, 0x29, 0x38, 0x76, 0x60, 0xf2, 0xce, 0x0e, 0xf1, - 0x6e, 0x7a, 0xa1, 0x13, 0xb9, 0xe1, 0x96, 0xcb, 0xec, 0xcb, 0x7c, 0xde, 0xc8, 0xbc, 0xbd, 0x93, - 0xb7, 0x93, 0x04, 0x87, 0x07, 0xb3, 0xe7, 0x0c, 0x40, 0xdc, 0x64, 0xbe, 0x91, 0xe0, 0x34, 0xd3, - 0x74, 0x06, 0x93, 0xbe, 0x07, 0x9c, 0xc1, 0xa4, 0xe9, 0x0a, 0xaf, 0x14, 0xf9, 0x16, 0x80, 0xdd, - 0x1c, 0xd7, 0x14, 0x14, 0x6b, 0x14, 0xe8, 0x13, 0x80, 0xf4, 0x0c, 0x4d, 0x46, 0x7c, 0xcc, 0x67, - 0xef, 0x1d, 0xcc, 0xa2, 0xf5, 0x14, 0xf6, 0xf0, 0x60, 0x76, 0x8a, 0x42, 0xcb, 0x1e, 0xbd, 0x81, - 0xc6, 0x41, 0xc5, 0x32, 0x18, 0xa1, 0xdb, 0x30, 0x41, 0xa1, 0x6c, 0x45, 0xc9, 0xe0, 0x9b, 0xfc, - 0xd6, 0xf8, 0xf4, 0xbd, 0x83, 0xd9, 0x89, 0xf5, 0x04, 0x2e, 0x8f, 0x75, 0x8a, 0x09, 0x7a, 0x19, - 0xc6, 0xe2, 0x79, 0x75, 0x8d, 0xec, 0xf3, 0x60, 0x37, 0x25, 0xae, 0xf8, 0x5e, 0x33, 0x30, 0x38, - 0x41, 0x69, 0x7f, 0xde, 0x82, 0xb3, 0xb9, 0x59, 0xc4, 0xd1, 0x25, 0x18, 0x72, 0x5a, 0x2e, 0x37, - 0x63, 0x88, 0xa3, 0x86, 0xa9, 0xcb, 0x2a, 0x65, 0x6e, 0xc4, 0x50, 0x58, 0xba, 0xc3, 0xef, 0xba, - 0x5e, 0x3d, 0xb9, 0xc3, 0x5f, 0x73, 0xbd, 0x3a, 0x66, 0x18, 0x75, 0x64, 0x15, 0x73, 0x9f, 0x24, - 0x7c, 0x85, 0xae, 0xd5, 0x8c, 0x7c, 0xe3, 0x27, 0xdb, 0x0c, 0xf4, 0xb4, 0x6e, 0x72, 0x14, 0xde, - 0x85, 0xb9, 0xe6, 0xc6, 0xef, 0xb3, 0x40, 0x3c, 0x61, 0xee, 0xe1, 0x4c, 0xfe, 0x18, 0x8c, 0xec, - 0xa5, 0xb3, 0xd7, 0x5d, 0xc8, 0x7f, 0xd3, 0x2d, 0xa2, 0x7e, 0x2b, 0x41, 0xdb, 0xc8, 0x54, 0x67, - 0xf0, 0xb2, 0xeb, 0x20, 0xb0, 0xcb, 0x84, 0x19, 0x16, 0xba, 0xb7, 0xe6, 0x39, 0x80, 0x3a, 0xa3, - 0x65, 0x29, 0x6d, 0x0b, 0xa6, 0xc4, 0xb5, 0xac, 0x30, 0x58, 0xa3, 0xb2, 0xff, 0x75, 0x01, 0x86, - 0x65, 0xb6, 0xb4, 0xb6, 0xd7, 0x8b, 0xfa, 0xef, 0x48, 0xe9, 0x93, 0xd1, 0x3c, 0x94, 0x98, 0x7e, - 0xba, 0x12, 0x6b, 0x4d, 0x95, 0x76, 0x68, 0x4d, 0x22, 0x70, 0x4c, 0x43, 0x77, 0xc7, 0xb0, 0xbd, - 0xc9, 0xc8, 0x13, 0x0f, 0x6e, 0xab, 0x1c, 0x8c, 0x25, 0x1e, 0x7d, 0x04, 0x26, 0x78, 0xb9, 0xc0, - 0x6f, 0x39, 0xdb, 0xdc, 0xa6, 0xd5, 0xaf, 0xa2, 0x98, 0x4c, 0xac, 0x25, 0x70, 0x87, 0x07, 0xb3, - 0xa7, 0x92, 0x30, 0x66, 0xac, 0x4d, 0x71, 0x61, 0xae, 0x6b, 0xbc, 0x12, 0xba, 0xab, 0xa7, 0x3c, - 0xde, 0x62, 0x14, 0xd6, 0xe9, 0xec, 0x4f, 0x01, 0x4a, 0xe7, 0x8d, 0x43, 0xaf, 0x71, 0xd7, 0x67, - 0x37, 0x20, 0xf5, 0x4e, 0xc6, 0x5b, 0x3d, 0x56, 0x87, 0x7c, 0x2b, 0xc7, 0x4b, 0x61, 0x55, 0xde, - 0xfe, 0x0b, 0x45, 0x98, 0x48, 0x46, 0x07, 0x40, 0x57, 0x61, 0x80, 0x8b, 0x94, 0x82, 0x7d, 0x07, - 0xdf, 0x20, 0x2d, 0xa6, 0x00, 0x3b, 0x5c, 0x85, 0x54, 0x2a, 0xca, 0xa3, 0x37, 0x60, 0xb8, 0xee, - 0xdf, 0xf1, 0xee, 0x38, 0x41, 0x7d, 0xa1, 0x52, 0x16, 0xd3, 0x39, 0x53, 0x59, 0xb1, 0x1c, 0x93, - 0xe9, 0x71, 0x0a, 0x98, 0x1d, 0x3c, 0x46, 0x61, 0x9d, 0x1d, 0xda, 0x60, 0xc9, 0x26, 0xb6, 0xdc, - 0xed, 0x35, 0xa7, 0xd5, 0xe9, 0x1d, 0xcc, 0x92, 0x24, 0xd2, 0x38, 0x8f, 0x8a, 0x8c, 0x14, 0x1c, - 0x81, 0x63, 0x46, 0xe8, 0x33, 0x30, 0x15, 0xe6, 0x98, 0x50, 0xf2, 0xd2, 0x88, 0x76, 0xb2, 0x2a, - 0x2c, 0x3e, 0x74, 0xef, 0x60, 0x76, 0x2a, 0xcb, 0xd8, 0x92, 0x55, 0x8d, 0xfd, 0xc5, 0x53, 0x60, - 0x2c, 0x62, 0x23, 0xab, 0xb4, 0x75, 0x4c, 0x59, 0xa5, 0x31, 0x0c, 0x91, 0x66, 0x2b, 0xda, 0x5f, - 0x76, 0x03, 0x31, 0x26, 0x99, 0x3c, 0x57, 0x04, 0x4d, 0x9a, 0xa7, 0xc4, 0x60, 0xc5, 0x27, 0x3b, - 0xf5, 0x77, 0xf1, 0x9b, 0x98, 0xfa, 0xbb, 0xef, 0x04, 0x53, 0x7f, 0xaf, 0xc3, 0xe0, 0xb6, 0x1b, - 0x61, 0xd2, 0xf2, 0xc5, 0x65, 0x2e, 0x73, 0x1e, 0x5e, 0xe1, 0x24, 0xe9, 0x24, 0xb3, 0x02, 0x81, - 0x25, 0x13, 0xf4, 0x9a, 0x5a, 0x81, 0x03, 0xf9, 0x0a, 0x97, 0xb4, 0x13, 0x4b, 0xe6, 0x1a, 0x14, - 0x09, 0xbe, 0x07, 0xef, 0x37, 0xc1, 0xf7, 0xaa, 0x4c, 0xcb, 0x3d, 0x94, 0xff, 0x68, 0x8d, 0x65, - 0xdd, 0xee, 0x92, 0x8c, 0xfb, 0x96, 0x9e, 0xca, 0xbc, 0x94, 0xbf, 0x13, 0xa8, 0x2c, 0xe5, 0x3d, - 0x26, 0x30, 0xff, 0x3e, 0x0b, 0x4e, 0xb7, 0xb2, 0xb2, 0xfa, 0x0b, 0x7f, 0x8f, 0x17, 0x7b, 0xc9, - 0xfd, 0xca, 0x0a, 0x18, 0x15, 0x32, 0x3d, 0x69, 0x26, 0x19, 0xce, 0xae, 0x8e, 0x76, 0x74, 0xb0, - 0x59, 0x17, 0x7e, 0x07, 0x8f, 0xe5, 0x64, 0x42, 0xef, 0x90, 0xff, 0x7c, 0x23, 0x23, 0xeb, 0xf6, - 0xe3, 0x79, 0x59, 0xb7, 0x7b, 0xce, 0xb5, 0xfd, 0x9a, 0xca, 0x81, 0x3e, 0x9a, 0x3f, 0x95, 0x78, - 0x86, 0xf3, 0xae, 0x99, 0xcf, 0x5f, 0x53, 0x99, 0xcf, 0x3b, 0x84, 0xd7, 0xe6, 0x79, 0xcd, 0xbb, - 0xe6, 0x3b, 0xd7, 0x72, 0x96, 0x8f, 0x1f, 0x4f, 0xce, 0x72, 0xe3, 0xa8, 0xe1, 0x69, 0xb3, 0x9f, - 0xee, 0x72, 0xd4, 0x18, 0x7c, 0x3b, 0x1f, 0x36, 0x3c, 0x3f, 0xfb, 0xe4, 0x7d, 0xe5, 0x67, 0xbf, - 0xa5, 0xe7, 0x3b, 0x47, 0x5d, 0x12, 0x7a, 0x53, 0xa2, 0x1e, 0xb3, 0x9c, 0xdf, 0xd2, 0x0f, 0xc0, - 0xa9, 0x7c, 0xbe, 0xea, 0x9c, 0x4b, 0xf3, 0xcd, 0x3c, 0x02, 0x53, 0xd9, 0xd3, 0x4f, 0x9d, 0x4c, - 0xf6, 0xf4, 0xd3, 0xc7, 0x9e, 0x3d, 0xfd, 0xcc, 0x09, 0x64, 0x4f, 0x7f, 0xe8, 0x04, 0xb3, 0xa7, - 0xdf, 0x62, 0x4e, 0x52, 0x3c, 0x10, 0x94, 0x08, 0x07, 0xfe, 0x54, 0x4e, 0x1c, 0xb5, 0x74, 0xb4, - 0x28, 0xfe, 0x71, 0x0a, 0x85, 0x63, 0x56, 0x19, 0x59, 0xd9, 0xa7, 0x1f, 0x40, 0x56, 0xf6, 0xf5, - 0x38, 0x2b, 0xfb, 0xd9, 0xfc, 0xa1, 0xce, 0x78, 0x56, 0x93, 0x93, 0x8b, 0xfd, 0x96, 0x9e, 0x43, - 0xfd, 0xe1, 0x0e, 0x96, 0xb0, 0x2c, 0x85, 0x72, 0x87, 0xcc, 0xe9, 0xaf, 0xf2, 0xcc, 0xe9, 0x8f, - 0xe4, 0xef, 0xe4, 0xc9, 0xe3, 0xce, 0xc8, 0x97, 0x4e, 0xdb, 0xa5, 0x02, 0xa9, 0xb2, 0xc0, 0xe7, - 0x39, 0xed, 0x52, 0x91, 0x58, 0xd3, 0xed, 0x52, 0x28, 0x1c, 0xb3, 0xb2, 0x7f, 0xa0, 0x00, 0xe7, - 0x3b, 0xaf, 0xb7, 0x58, 0x4b, 0x5e, 0x89, 0x1d, 0x03, 0x12, 0x5a, 0x72, 0x7e, 0x67, 0x8b, 0xa9, - 0x7a, 0x8e, 0x0b, 0x79, 0x05, 0x26, 0xd5, 0x7b, 0x9c, 0x86, 0x5b, 0xdb, 0x5f, 0x8f, 0xaf, 0xc9, - 0x2a, 0x82, 0x42, 0x35, 0x49, 0x80, 0xd3, 0x65, 0xd0, 0x02, 0x8c, 0x1b, 0xc0, 0xf2, 0xb2, 0xb8, - 0x9b, 0xc5, 0xa1, 0xb6, 0x4d, 0x34, 0x4e, 0xd2, 0xdb, 0x5f, 0xb2, 0xe0, 0xa1, 0x9c, 0xb4, 0xa3, - 0x3d, 0x87, 0x3d, 0xdc, 0x82, 0xf1, 0x96, 0x59, 0xb4, 0x4b, 0xa4, 0x56, 0x23, 0xb9, 0xa9, 0x6a, - 0x6b, 0x02, 0x81, 0x93, 0x4c, 0xed, 0x9f, 0x2e, 0xc0, 0xb9, 0x8e, 0x0e, 0xa6, 0x08, 0xc3, 0x99, - 0xed, 0x66, 0xe8, 0x2c, 0x05, 0xa4, 0x4e, 0xbc, 0xc8, 0x75, 0x1a, 0xd5, 0x16, 0xa9, 0x69, 0x76, - 0x0e, 0xe6, 0xa9, 0x79, 0x65, 0xad, 0xba, 0x90, 0xa6, 0xc0, 0x39, 0x25, 0xd1, 0x2a, 0xa0, 0x34, - 0x46, 0x8c, 0x30, 0x0b, 0xa1, 0x9f, 0xe6, 0x87, 0x33, 0x4a, 0xa0, 0x0f, 0xc0, 0xa8, 0x72, 0x5c, - 0xd5, 0x46, 0x9c, 0x6d, 0xec, 0x58, 0x47, 0x60, 0x93, 0x0e, 0x5d, 0xe6, 0x39, 0x18, 0x44, 0xb6, - 0x0e, 0x61, 0x14, 0x19, 0x97, 0x09, 0x16, 0x04, 0x18, 0xeb, 0x34, 0x8b, 0x2f, 0xfd, 0xda, 0xef, - 0x9d, 0x7f, 0xcf, 0x6f, 0xfc, 0xde, 0xf9, 0xf7, 0xfc, 0xf6, 0xef, 0x9d, 0x7f, 0xcf, 0x77, 0xdd, - 0x3b, 0x6f, 0xfd, 0xda, 0xbd, 0xf3, 0xd6, 0x6f, 0xdc, 0x3b, 0x6f, 0xfd, 0xf6, 0xbd, 0xf3, 0xd6, - 0xef, 0xde, 0x3b, 0x6f, 0x7d, 0xe1, 0xf7, 0xcf, 0xbf, 0xe7, 0x63, 0x28, 0x0e, 0x24, 0x3a, 0x4f, - 0x47, 0x67, 0x7e, 0xef, 0xf2, 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff, 0x52, 0x56, 0xa0, 0x3b, 0xf7, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x69, 0x8c, 0x64, 0xd7, + 0x75, 0x18, 0xac, 0x57, 0xd5, 0x5b, 0x9d, 0xde, 0x6f, 0xcf, 0x0c, 0x7b, 0x9a, 0x9c, 0xe9, 0xe1, + 0x23, 0x39, 0x1c, 0x8a, 0x64, 0xb7, 0x86, 0x8b, 0x44, 0x91, 0x12, 0xad, 0x5e, 0x67, 0x8a, 0x33, + 0xdd, 0x53, 0xbc, 0xd5, 0x33, 0xa3, 0x85, 0x12, 0xf4, 0xba, 0xea, 0x76, 0xf7, 0x53, 0x57, 0xbd, + 0x57, 0x7c, 0xef, 0x55, 0xcf, 0x34, 0x3f, 0x09, 0x9f, 0x3f, 0x79, 0x95, 0xed, 0xef, 0x83, 0xf0, + 0xc1, 0x59, 0x20, 0x1b, 0x46, 0x60, 0x3b, 0xb6, 0x15, 0x65, 0x53, 0xe4, 0xd8, 0x8e, 0xe5, 0xd8, + 0xce, 0xee, 0x04, 0x81, 0xe3, 0x18, 0x88, 0x65, 0xc0, 0x48, 0xc7, 0x1e, 0x07, 0x30, 0x04, 0x24, + 0xb6, 0xb3, 0x01, 0x49, 0xc7, 0x89, 0x83, 0xbb, 0xbe, 0x7b, 0xdf, 0x52, 0x55, 0x3d, 0xec, 0x69, + 0x51, 0x02, 0xff, 0x55, 0x9d, 0x73, 0xee, 0xb9, 0xf7, 0xdd, 0xf5, 0xdc, 0x73, 0xce, 0x3d, 0x07, + 0x5e, 0xd9, 0x7d, 0x29, 0x9c, 0x73, 0xfd, 0xf9, 0xdd, 0xf6, 0x26, 0x09, 0x3c, 0x12, 0x91, 0x70, + 0x7e, 0x8f, 0x78, 0x75, 0x3f, 0x98, 0x17, 0x08, 0xa7, 0xe5, 0xce, 0xd7, 0xfc, 0x80, 0xcc, 0xef, + 0x5d, 0x9e, 0xdf, 0x26, 0x1e, 0x09, 0x9c, 0x88, 0xd4, 0xe7, 0x5a, 0x81, 0x1f, 0xf9, 0x08, 0x71, + 0x9a, 0x39, 0xa7, 0xe5, 0xce, 0x51, 0x9a, 0xb9, 0xbd, 0xcb, 0x33, 0xcf, 0x6e, 0xbb, 0xd1, 0x4e, + 0x7b, 0x73, 0xae, 0xe6, 0x37, 0xe7, 0xb7, 0xfd, 0x6d, 0x7f, 0x9e, 0x91, 0x6e, 0xb6, 0xb7, 0xd8, + 0x3f, 0xf6, 0x87, 0xfd, 0xe2, 0x2c, 0x66, 0x5e, 0x88, 0xab, 0x69, 0x3a, 0xb5, 0x1d, 0xd7, 0x23, + 0xc1, 0xfe, 0x7c, 0x6b, 0x77, 0x9b, 0xd5, 0x1b, 0x90, 0xd0, 0x6f, 0x07, 0x35, 0x92, 0xac, 0xb8, + 0x63, 0xa9, 0x70, 0xbe, 0x49, 0x22, 0x27, 0xa3, 0xb9, 0x33, 0xf3, 0x79, 0xa5, 0x82, 0xb6, 0x17, + 0xb9, 0xcd, 0x74, 0x35, 0xef, 0xef, 0x56, 0x20, 0xac, 0xed, 0x90, 0xa6, 0x93, 0x2a, 0xf7, 0x7c, + 0x5e, 0xb9, 0x76, 0xe4, 0x36, 0xe6, 0x5d, 0x2f, 0x0a, 0xa3, 0x20, 0x59, 0xc8, 0xfe, 0x86, 0x05, + 0x17, 0x16, 0x6e, 0x57, 0x57, 0x1a, 0x4e, 0x18, 0xb9, 0xb5, 0xc5, 0x86, 0x5f, 0xdb, 0xad, 0x46, + 0x7e, 0x40, 0x6e, 0xf9, 0x8d, 0x76, 0x93, 0x54, 0x59, 0x47, 0xa0, 0x67, 0x60, 0x68, 0x8f, 0xfd, + 0x2f, 0x2f, 0x4f, 0x5b, 0x17, 0xac, 0x4b, 0xa5, 0xc5, 0x89, 0xdf, 0x38, 0x98, 0x7d, 0xcf, 0xbd, + 0x83, 0xd9, 0xa1, 0x5b, 0x02, 0x8e, 0x15, 0x05, 0xba, 0x08, 0x03, 0x5b, 0xe1, 0xc6, 0x7e, 0x8b, + 0x4c, 0x17, 0x18, 0xed, 0x98, 0xa0, 0x1d, 0x58, 0xad, 0x52, 0x28, 0x16, 0x58, 0x34, 0x0f, 0xa5, + 0x96, 0x13, 0x44, 0x6e, 0xe4, 0xfa, 0xde, 0x74, 0xf1, 0x82, 0x75, 0xa9, 0x7f, 0x71, 0x52, 0x90, + 0x96, 0x2a, 0x12, 0x81, 0x63, 0x1a, 0xda, 0x8c, 0x80, 0x38, 0xf5, 0x1b, 0x5e, 0x63, 0x7f, 0xba, + 0xef, 0x82, 0x75, 0x69, 0x28, 0x6e, 0x06, 0x16, 0x70, 0xac, 0x28, 0xec, 0x2f, 0x15, 0x60, 0x68, + 0x61, 0x6b, 0xcb, 0xf5, 0xdc, 0x68, 0x1f, 0xdd, 0x82, 0x11, 0xcf, 0xaf, 0x13, 0xf9, 0x9f, 0x7d, + 0xc5, 0xf0, 0x73, 0x17, 0xe6, 0xd2, 0x53, 0x69, 0x6e, 0x5d, 0xa3, 0x5b, 0x9c, 0xb8, 0x77, 0x30, + 0x3b, 0xa2, 0x43, 0xb0, 0xc1, 0x07, 0x61, 0x18, 0x6e, 0xf9, 0x75, 0xc5, 0xb6, 0xc0, 0xd8, 0xce, + 0x66, 0xb1, 0xad, 0xc4, 0x64, 0x8b, 0xe3, 0xf7, 0x0e, 0x66, 0x87, 0x35, 0x00, 0xd6, 0x99, 0xa0, + 0x4d, 0x18, 0xa7, 0x7f, 0xbd, 0xc8, 0x55, 0x7c, 0x8b, 0x8c, 0xef, 0x63, 0x79, 0x7c, 0x35, 0xd2, + 0xc5, 0xa9, 0x7b, 0x07, 0xb3, 0xe3, 0x09, 0x20, 0x4e, 0x32, 0xb4, 0xdf, 0x82, 0xb1, 0x85, 0x28, + 0x72, 0x6a, 0x3b, 0xa4, 0xce, 0x47, 0x10, 0xbd, 0x00, 0x7d, 0x9e, 0xd3, 0x24, 0x62, 0x7c, 0x2f, + 0x88, 0x8e, 0xed, 0x5b, 0x77, 0x9a, 0xe4, 0xf0, 0x60, 0x76, 0xe2, 0xa6, 0xe7, 0xbe, 0xd9, 0x16, + 0xb3, 0x82, 0xc2, 0x30, 0xa3, 0x46, 0xcf, 0x01, 0xd4, 0xc9, 0x9e, 0x5b, 0x23, 0x15, 0x27, 0xda, + 0x11, 0xe3, 0x8d, 0x44, 0x59, 0x58, 0x56, 0x18, 0xac, 0x51, 0xd9, 0x77, 0xa1, 0xb4, 0xb0, 0xe7, + 0xbb, 0xf5, 0x8a, 0x5f, 0x0f, 0xd1, 0x2e, 0x8c, 0xb7, 0x02, 0xb2, 0x45, 0x02, 0x05, 0x9a, 0xb6, + 0x2e, 0x14, 0x2f, 0x0d, 0x3f, 0x77, 0x29, 0xf3, 0x63, 0x4d, 0xd2, 0x15, 0x2f, 0x0a, 0xf6, 0x17, + 0x1f, 0x12, 0xf5, 0x8d, 0x27, 0xb0, 0x38, 0xc9, 0xd9, 0xfe, 0xc7, 0x05, 0x38, 0xbd, 0xf0, 0x56, + 0x3b, 0x20, 0xcb, 0x6e, 0xb8, 0x9b, 0x9c, 0xe1, 0x75, 0x37, 0xdc, 0x5d, 0x8f, 0x7b, 0x40, 0x4d, + 0xad, 0x65, 0x01, 0xc7, 0x8a, 0x02, 0x3d, 0x0b, 0x83, 0xf4, 0xf7, 0x4d, 0x5c, 0x16, 0x9f, 0x3c, + 0x25, 0x88, 0x87, 0x97, 0x9d, 0xc8, 0x59, 0xe6, 0x28, 0x2c, 0x69, 0xd0, 0x1a, 0x0c, 0xd7, 0xd8, + 0x82, 0xdc, 0x5e, 0xf3, 0xeb, 0x84, 0x0d, 0x66, 0x69, 0xf1, 0x69, 0x4a, 0xbe, 0x14, 0x83, 0x0f, + 0x0f, 0x66, 0xa7, 0x79, 0xdb, 0x04, 0x0b, 0x0d, 0x87, 0xf5, 0xf2, 0xc8, 0x56, 0xeb, 0xab, 0x8f, + 0x71, 0x82, 0x8c, 0xb5, 0x75, 0x49, 0x5b, 0x2a, 0xfd, 0x6c, 0xa9, 0x8c, 0x64, 0x2f, 0x13, 0x74, + 0x19, 0xfa, 0x76, 0x5d, 0xaf, 0x3e, 0x3d, 0xc0, 0x78, 0x9d, 0xa3, 0x63, 0x7e, 0xcd, 0xf5, 0xea, + 0x87, 0x07, 0xb3, 0x93, 0x46, 0x73, 0x28, 0x10, 0x33, 0x52, 0xfb, 0xbf, 0x58, 0x30, 0xcb, 0x70, + 0xab, 0x6e, 0x83, 0x54, 0x48, 0x10, 0xba, 0x61, 0x44, 0xbc, 0xc8, 0xe8, 0xd0, 0xe7, 0x00, 0x42, + 0x52, 0x0b, 0x48, 0xa4, 0x75, 0xa9, 0x9a, 0x18, 0x55, 0x85, 0xc1, 0x1a, 0x15, 0xdd, 0x10, 0xc2, + 0x1d, 0x27, 0x60, 0xf3, 0x4b, 0x74, 0xac, 0xda, 0x10, 0xaa, 0x12, 0x81, 0x63, 0x1a, 0x63, 0x43, + 0x28, 0x76, 0xdb, 0x10, 0xd0, 0x87, 0x61, 0x3c, 0xae, 0x2c, 0x6c, 0x39, 0x35, 0xd9, 0x81, 0x6c, + 0xc9, 0x54, 0x4d, 0x14, 0x4e, 0xd2, 0xda, 0x7f, 0xcd, 0x12, 0x93, 0x87, 0x7e, 0xf5, 0x3b, 0xfc, + 0x5b, 0xed, 0x5f, 0xb2, 0x60, 0x70, 0xd1, 0xf5, 0xea, 0xae, 0xb7, 0x8d, 0x3e, 0x0d, 0x43, 0xf4, + 0x6c, 0xaa, 0x3b, 0x91, 0x23, 0xf6, 0xbd, 0xf7, 0x69, 0x6b, 0x4b, 0x1d, 0x15, 0x73, 0xad, 0xdd, + 0x6d, 0x0a, 0x08, 0xe7, 0x28, 0x35, 0x5d, 0x6d, 0x37, 0x36, 0x3f, 0x43, 0x6a, 0xd1, 0x1a, 0x89, + 0x9c, 0xf8, 0x73, 0x62, 0x18, 0x56, 0x5c, 0xd1, 0x35, 0x18, 0x88, 0x9c, 0x60, 0x9b, 0x44, 0x62, + 0x03, 0xcc, 0xdc, 0xa8, 0x78, 0x49, 0x4c, 0x57, 0x24, 0xf1, 0x6a, 0x24, 0x3e, 0x16, 0x36, 0x58, + 0x51, 0x2c, 0x58, 0xd8, 0xff, 0x6b, 0x10, 0xce, 0x2e, 0x55, 0xcb, 0x39, 0xf3, 0xea, 0x22, 0x0c, + 0xd4, 0x03, 0x77, 0x8f, 0x04, 0xa2, 0x9f, 0x15, 0x97, 0x65, 0x06, 0xc5, 0x02, 0x8b, 0x5e, 0x82, + 0x11, 0x7e, 0x20, 0x5d, 0x75, 0xbc, 0x7a, 0x43, 0x76, 0xf1, 0x29, 0x41, 0x3d, 0x72, 0x4b, 0xc3, + 0x61, 0x83, 0xf2, 0x88, 0x93, 0xea, 0x62, 0x62, 0x31, 0xe6, 0x1d, 0x76, 0x5f, 0xb0, 0x60, 0x82, + 0x57, 0xb3, 0x10, 0x45, 0x81, 0xbb, 0xd9, 0x8e, 0x48, 0x38, 0xdd, 0xcf, 0x76, 0xba, 0xa5, 0xac, + 0xde, 0xca, 0xed, 0x81, 0xb9, 0x5b, 0x09, 0x2e, 0x7c, 0x13, 0x9c, 0x16, 0xf5, 0x4e, 0x24, 0xd1, + 0x38, 0x55, 0x2d, 0xfa, 0x1e, 0x0b, 0x66, 0x6a, 0xbe, 0x17, 0x05, 0x7e, 0xa3, 0x41, 0x82, 0x4a, + 0x7b, 0xb3, 0xe1, 0x86, 0x3b, 0x7c, 0x9e, 0x62, 0xb2, 0xc5, 0x76, 0x82, 0x9c, 0x31, 0x54, 0x44, + 0x62, 0x0c, 0xcf, 0xdf, 0x3b, 0x98, 0x9d, 0x59, 0xca, 0x65, 0x85, 0x3b, 0x54, 0x83, 0x76, 0x01, + 0xd1, 0xa3, 0xb4, 0x1a, 0x39, 0xdb, 0x24, 0xae, 0x7c, 0xb0, 0xf7, 0xca, 0xcf, 0xdc, 0x3b, 0x98, + 0x45, 0xeb, 0x29, 0x16, 0x38, 0x83, 0x2d, 0x7a, 0x13, 0x4e, 0x51, 0x68, 0xea, 0x5b, 0x87, 0x7a, + 0xaf, 0x6e, 0xfa, 0xde, 0xc1, 0xec, 0xa9, 0xf5, 0x0c, 0x26, 0x38, 0x93, 0x35, 0xfa, 0x6e, 0x0b, + 0xce, 0xc6, 0x9f, 0xbf, 0x72, 0xb7, 0xe5, 0x78, 0xf5, 0xb8, 0xe2, 0x52, 0xef, 0x15, 0xd3, 0x3d, + 0xf9, 0xec, 0x52, 0x1e, 0x27, 0x9c, 0x5f, 0x09, 0xf2, 0x60, 0x8a, 0x36, 0x2d, 0x59, 0x37, 0xf4, + 0x5e, 0xf7, 0x43, 0xf7, 0x0e, 0x66, 0xa7, 0xd6, 0xd3, 0x3c, 0x70, 0x16, 0xe3, 0x99, 0x25, 0x38, + 0x9d, 0x39, 0x3b, 0xd1, 0x04, 0x14, 0x77, 0x09, 0x97, 0xba, 0x4a, 0x98, 0xfe, 0x44, 0xa7, 0xa0, + 0x7f, 0xcf, 0x69, 0xb4, 0xc5, 0xc2, 0xc4, 0xfc, 0xcf, 0xcb, 0x85, 0x97, 0x2c, 0xfb, 0x9f, 0x14, + 0x61, 0x7c, 0xa9, 0x5a, 0xbe, 0xaf, 0x55, 0xaf, 0x1f, 0x7b, 0x85, 0x8e, 0xc7, 0x5e, 0x7c, 0x88, + 0x16, 0x73, 0x0f, 0xd1, 0xff, 0x3b, 0x63, 0xc9, 0xf6, 0xb1, 0x25, 0xfb, 0xc1, 0x9c, 0x25, 0x7b, + 0xcc, 0x0b, 0x75, 0x2f, 0x67, 0xd6, 0xf6, 0xb3, 0x01, 0xcc, 0x94, 0x90, 0xae, 0xfb, 0x35, 0xa7, + 0x91, 0xdc, 0x6a, 0x8f, 0x38, 0x75, 0x8f, 0x67, 0x1c, 0x6b, 0x30, 0xb2, 0xe4, 0xb4, 0x9c, 0x4d, + 0xb7, 0xe1, 0x46, 0x2e, 0x09, 0xd1, 0x93, 0x50, 0x74, 0xea, 0x75, 0x26, 0xdd, 0x95, 0x16, 0x4f, + 0xdf, 0x3b, 0x98, 0x2d, 0x2e, 0xd4, 0xa9, 0x98, 0x01, 0x8a, 0x6a, 0x1f, 0x53, 0x0a, 0xf4, 0x5e, + 0xe8, 0xab, 0x07, 0x7e, 0x6b, 0xba, 0xc0, 0x28, 0xe9, 0x2a, 0xef, 0x5b, 0x0e, 0xfc, 0x56, 0x82, + 0x94, 0xd1, 0xd8, 0xbf, 0x5e, 0x80, 0x47, 0x96, 0x48, 0x6b, 0x67, 0xb5, 0x9a, 0x73, 0x5e, 0x5c, + 0x82, 0xa1, 0xa6, 0xef, 0xb9, 0x91, 0x1f, 0x84, 0xa2, 0x6a, 0x36, 0x23, 0xd6, 0x04, 0x0c, 0x2b, + 0x2c, 0xba, 0x00, 0x7d, 0xad, 0x58, 0x88, 0x1d, 0x91, 0x02, 0x30, 0x13, 0x5f, 0x19, 0x86, 0x52, + 0xb4, 0x43, 0x12, 0x88, 0x19, 0xa3, 0x28, 0x6e, 0x86, 0x24, 0xc0, 0x0c, 0x13, 0x4b, 0x02, 0x54, + 0x46, 0x10, 0x27, 0x42, 0x42, 0x12, 0xa0, 0x18, 0xac, 0x51, 0xa1, 0x0a, 0x94, 0xc2, 0xc4, 0xc8, + 0xf6, 0xb4, 0x34, 0x47, 0x99, 0xa8, 0xa0, 0x46, 0x32, 0x66, 0x62, 0x9c, 0x60, 0x03, 0x5d, 0x45, + 0x85, 0xaf, 0x17, 0x00, 0xf1, 0x2e, 0xfc, 0x36, 0xeb, 0xb8, 0x9b, 0xe9, 0x8e, 0xeb, 0x7d, 0x49, + 0x1c, 0x57, 0xef, 0xfd, 0x57, 0x0b, 0x1e, 0x59, 0x72, 0xbd, 0x3a, 0x09, 0x72, 0x26, 0xe0, 0x83, + 0xb9, 0x3b, 0x1f, 0x4d, 0x48, 0x31, 0xa6, 0x58, 0xdf, 0x31, 0x4c, 0x31, 0xfb, 0x4f, 0x2c, 0x40, + 0xfc, 0xb3, 0xdf, 0x71, 0x1f, 0x7b, 0x33, 0xfd, 0xb1, 0xc7, 0x30, 0x2d, 0xec, 0xbf, 0x65, 0xc1, + 0xf0, 0x52, 0xc3, 0x71, 0x9b, 0xe2, 0x53, 0x97, 0x60, 0x52, 0x2a, 0x8a, 0x18, 0x58, 0x93, 0xfd, + 0xe9, 0xe6, 0x36, 0x89, 0x93, 0x48, 0x9c, 0xa6, 0x47, 0x9f, 0x80, 0xb3, 0x06, 0x70, 0x83, 0x34, + 0x5b, 0x0d, 0x27, 0xd2, 0x6f, 0x05, 0xec, 0xf4, 0xc7, 0x79, 0x44, 0x38, 0xbf, 0xbc, 0x7d, 0x1d, + 0xc6, 0x96, 0x1a, 0x2e, 0xf1, 0xa2, 0x72, 0x65, 0xc9, 0xf7, 0xb6, 0xdc, 0x6d, 0xf4, 0x32, 0x8c, + 0x45, 0x6e, 0x93, 0xf8, 0xed, 0xa8, 0x4a, 0x6a, 0xbe, 0xc7, 0xee, 0xda, 0xd6, 0xa5, 0xfe, 0x45, + 0x74, 0xef, 0x60, 0x76, 0x6c, 0xc3, 0xc0, 0xe0, 0x04, 0xa5, 0xfd, 0x7b, 0x74, 0xc4, 0xfd, 0x66, + 0xcb, 0xf7, 0x88, 0x17, 0x2d, 0xf9, 0x5e, 0x9d, 0xeb, 0x64, 0x5e, 0x86, 0xbe, 0x88, 0x8e, 0x20, + 0xff, 0xf2, 0x8b, 0x72, 0x69, 0xd3, 0x71, 0x3b, 0x3c, 0x98, 0x3d, 0x93, 0x2e, 0xc1, 0x46, 0x96, + 0x95, 0x41, 0x1f, 0x84, 0x81, 0x30, 0x72, 0xa2, 0x76, 0x28, 0x3e, 0xf5, 0x51, 0x39, 0xfe, 0x55, + 0x06, 0x3d, 0x3c, 0x98, 0x1d, 0x57, 0xc5, 0x38, 0x08, 0x8b, 0x02, 0xe8, 0x29, 0x18, 0x6c, 0x92, + 0x30, 0x74, 0xb6, 0xe5, 0xf9, 0x3d, 0x2e, 0xca, 0x0e, 0xae, 0x71, 0x30, 0x96, 0x78, 0xf4, 0x18, + 0xf4, 0x93, 0x20, 0xf0, 0x03, 0xb1, 0xab, 0x8c, 0x0a, 0xc2, 0xfe, 0x15, 0x0a, 0xc4, 0x1c, 0x67, + 0xff, 0x2b, 0x0b, 0xc6, 0x55, 0x5b, 0x79, 0x5d, 0x27, 0x70, 0x6f, 0xfa, 0x38, 0x40, 0x4d, 0x7e, + 0x60, 0xc8, 0xce, 0xbb, 0xe1, 0xe7, 0x2e, 0x66, 0x8a, 0x16, 0xa9, 0x6e, 0x8c, 0x39, 0x2b, 0x50, + 0x88, 0x35, 0x6e, 0xf6, 0xdf, 0xb7, 0x60, 0x2a, 0xf1, 0x45, 0xd7, 0xdd, 0x30, 0x42, 0x6f, 0xa4, + 0xbe, 0x6a, 0xae, 0xb7, 0xaf, 0xa2, 0xa5, 0xd9, 0x37, 0xa9, 0xc5, 0x27, 0x21, 0xda, 0x17, 0x5d, + 0x85, 0x7e, 0x37, 0x22, 0x4d, 0xf9, 0x31, 0x8f, 0x75, 0xfc, 0x18, 0xde, 0xaa, 0x78, 0x44, 0xca, + 0xb4, 0x24, 0xe6, 0x0c, 0xec, 0x5f, 0x2f, 0x42, 0x89, 0x4f, 0xdb, 0x35, 0xa7, 0x75, 0x02, 0x63, + 0xf1, 0x34, 0x94, 0xdc, 0x66, 0xb3, 0x1d, 0x39, 0x9b, 0xe2, 0x00, 0x1a, 0xe2, 0x9b, 0x41, 0x59, + 0x02, 0x71, 0x8c, 0x47, 0x65, 0xe8, 0x63, 0x4d, 0xe1, 0x5f, 0xf9, 0x64, 0xf6, 0x57, 0x8a, 0xb6, + 0xcf, 0x2d, 0x3b, 0x91, 0xc3, 0x65, 0x3f, 0x75, 0xf2, 0x51, 0x10, 0x66, 0x2c, 0x90, 0x03, 0xb0, + 0xe9, 0x7a, 0x4e, 0xb0, 0x4f, 0x61, 0xd3, 0x45, 0xc6, 0xf0, 0xd9, 0xce, 0x0c, 0x17, 0x15, 0x3d, + 0x67, 0xab, 0x3e, 0x2c, 0x46, 0x60, 0x8d, 0xe9, 0xcc, 0x07, 0xa0, 0xa4, 0x88, 0x8f, 0x22, 0xc2, + 0xcd, 0x7c, 0x18, 0xc6, 0x13, 0x75, 0x75, 0x2b, 0x3e, 0xa2, 0x4b, 0x80, 0xbf, 0xcc, 0xb6, 0x0c, + 0xd1, 0xea, 0x15, 0x6f, 0x4f, 0xec, 0x9c, 0x6f, 0xc1, 0xa9, 0x46, 0xc6, 0xde, 0x2b, 0xc6, 0xb5, + 0xf7, 0xbd, 0xfa, 0x11, 0xf1, 0xd9, 0xa7, 0xb2, 0xb0, 0x38, 0xb3, 0x0e, 0x2a, 0xd5, 0xf8, 0x2d, + 0xba, 0x40, 0x9c, 0x86, 0x7e, 0x41, 0xb8, 0x21, 0x60, 0x58, 0x61, 0xe9, 0x7e, 0x77, 0x4a, 0x35, + 0xfe, 0x1a, 0xd9, 0xaf, 0x92, 0x06, 0xa9, 0x45, 0x7e, 0xf0, 0x2d, 0x6d, 0xfe, 0x39, 0xde, 0xfb, + 0x7c, 0xbb, 0x1c, 0x16, 0x0c, 0x8a, 0xd7, 0xc8, 0x3e, 0x1f, 0x0a, 0xfd, 0xeb, 0x8a, 0x1d, 0xbf, + 0xee, 0xab, 0x16, 0x8c, 0xaa, 0xaf, 0x3b, 0x81, 0x7d, 0x61, 0xd1, 0xdc, 0x17, 0xce, 0x75, 0x9c, + 0xe0, 0x39, 0x3b, 0xc2, 0xd7, 0x0b, 0x70, 0x56, 0xd1, 0xd0, 0xdb, 0x0c, 0xff, 0x23, 0x66, 0xd5, + 0x3c, 0x94, 0x3c, 0xa5, 0xd7, 0xb3, 0x4c, 0x85, 0x5a, 0xac, 0xd5, 0x8b, 0x69, 0xa8, 0x50, 0xea, + 0xc5, 0xc7, 0xec, 0x88, 0xae, 0xf0, 0x16, 0xca, 0xed, 0x45, 0x28, 0xb6, 0xdd, 0xba, 0x38, 0x60, + 0xde, 0x27, 0x7b, 0xfb, 0x66, 0x79, 0xf9, 0xf0, 0x60, 0xf6, 0xd1, 0x3c, 0x63, 0x0b, 0x3d, 0xd9, + 0xc2, 0xb9, 0x9b, 0xe5, 0x65, 0x4c, 0x0b, 0xa3, 0x05, 0x18, 0x97, 0x27, 0xf4, 0x2d, 0x2a, 0x20, + 0xfa, 0x9e, 0x38, 0x87, 0x94, 0xd6, 0x1a, 0x9b, 0x68, 0x9c, 0xa4, 0x47, 0xcb, 0x30, 0xb1, 0xdb, + 0xde, 0x24, 0x0d, 0x12, 0xf1, 0x0f, 0xbe, 0x46, 0xb8, 0x4e, 0xb7, 0x14, 0xdf, 0x25, 0xaf, 0x25, + 0xf0, 0x38, 0x55, 0xc2, 0xfe, 0x73, 0x76, 0x1e, 0x88, 0xde, 0xab, 0x04, 0x3e, 0x9d, 0x58, 0x94, + 0xfb, 0xb7, 0x72, 0x3a, 0xf7, 0x32, 0x2b, 0xae, 0x91, 0xfd, 0x0d, 0x9f, 0xde, 0x25, 0xb2, 0x67, + 0x85, 0x31, 0xe7, 0xfb, 0x3a, 0xce, 0xf9, 0x9f, 0x2f, 0xc0, 0x69, 0xd5, 0x03, 0x86, 0xd8, 0xfa, + 0xed, 0xde, 0x07, 0x97, 0x61, 0xb8, 0x4e, 0xb6, 0x9c, 0x76, 0x23, 0x52, 0x06, 0x86, 0x7e, 0x6e, + 0x64, 0x5a, 0x8e, 0xc1, 0x58, 0xa7, 0x39, 0x42, 0xb7, 0xfd, 0xb7, 0x61, 0x76, 0x10, 0x47, 0x0e, + 0x9d, 0xe3, 0x6a, 0xd5, 0x58, 0xb9, 0xab, 0xe6, 0x31, 0xe8, 0x77, 0x9b, 0x54, 0x30, 0x2b, 0x98, + 0xf2, 0x56, 0x99, 0x02, 0x31, 0xc7, 0xa1, 0x27, 0x60, 0xb0, 0xe6, 0x37, 0x9b, 0x8e, 0x57, 0x67, + 0x47, 0x5e, 0x69, 0x71, 0x98, 0xca, 0x6e, 0x4b, 0x1c, 0x84, 0x25, 0x0e, 0x3d, 0x02, 0x7d, 0x4e, + 0xb0, 0xcd, 0xb5, 0x2e, 0xa5, 0xc5, 0x21, 0x5a, 0xd3, 0x42, 0xb0, 0x1d, 0x62, 0x06, 0xa5, 0x97, + 0xc6, 0x3b, 0x7e, 0xb0, 0xeb, 0x7a, 0xdb, 0xcb, 0x6e, 0x20, 0x96, 0x84, 0x3a, 0x0b, 0x6f, 0x2b, + 0x0c, 0xd6, 0xa8, 0xd0, 0x2a, 0xf4, 0xb7, 0xfc, 0x20, 0x0a, 0xa7, 0x07, 0x58, 0x77, 0x3f, 0x9a, + 0xb3, 0x11, 0xf1, 0xaf, 0xad, 0xf8, 0x41, 0x14, 0x7f, 0x00, 0xfd, 0x17, 0x62, 0x5e, 0x1c, 0x5d, + 0x87, 0x41, 0xe2, 0xed, 0xad, 0x06, 0x7e, 0x73, 0x7a, 0x2a, 0x9f, 0xd3, 0x0a, 0x27, 0xe1, 0xd3, + 0x2c, 0x96, 0x51, 0x05, 0x18, 0x4b, 0x16, 0xe8, 0x83, 0x50, 0x24, 0xde, 0xde, 0xf4, 0x20, 0xe3, + 0x34, 0x93, 0xc3, 0xe9, 0x96, 0x13, 0xc4, 0x7b, 0xfe, 0x8a, 0xb7, 0x87, 0x69, 0x19, 0xf4, 0x31, + 0x28, 0xc9, 0x0d, 0x23, 0x14, 0xea, 0xcc, 0xcc, 0x09, 0x2b, 0xb7, 0x19, 0x4c, 0xde, 0x6c, 0xbb, + 0x01, 0x69, 0x12, 0x2f, 0x0a, 0xe3, 0x1d, 0x52, 0x62, 0x43, 0x1c, 0x73, 0x43, 0x1f, 0x93, 0x3a, + 0xf4, 0x35, 0xbf, 0xed, 0x45, 0xe1, 0x74, 0x89, 0x35, 0x2f, 0xd3, 0xba, 0x79, 0x2b, 0xa6, 0x4b, + 0x2a, 0xd9, 0x79, 0x61, 0x6c, 0xb0, 0x42, 0x9f, 0x84, 0x51, 0xfe, 0x9f, 0xdb, 0x08, 0xc3, 0xe9, + 0xd3, 0x8c, 0xf7, 0x85, 0x7c, 0xde, 0x9c, 0x70, 0xf1, 0xb4, 0x60, 0x3e, 0xaa, 0x43, 0x43, 0x6c, + 0x72, 0x43, 0x18, 0x46, 0x1b, 0xee, 0x1e, 0xf1, 0x48, 0x18, 0x56, 0x02, 0x7f, 0x93, 0x08, 0x95, + 0xe7, 0xd9, 0x6c, 0x9b, 0xa2, 0xbf, 0x49, 0x16, 0x27, 0x29, 0xcf, 0xeb, 0x7a, 0x19, 0x6c, 0xb2, + 0x40, 0x37, 0x61, 0x8c, 0xde, 0x31, 0xdd, 0x98, 0xe9, 0x70, 0x37, 0xa6, 0xec, 0x5e, 0x85, 0x8d, + 0x42, 0x38, 0xc1, 0x04, 0xdd, 0x80, 0x91, 0x30, 0x72, 0x82, 0xa8, 0xdd, 0xe2, 0x4c, 0xcf, 0x74, + 0x63, 0xca, 0x4c, 0xd2, 0x55, 0xad, 0x08, 0x36, 0x18, 0xa0, 0xd7, 0xa0, 0xd4, 0x70, 0xb7, 0x48, + 0x6d, 0xbf, 0xd6, 0x20, 0xd3, 0x23, 0x8c, 0x5b, 0xe6, 0xa6, 0x72, 0x5d, 0x12, 0x71, 0x39, 0x57, + 0xfd, 0xc5, 0x71, 0x71, 0x74, 0x0b, 0xce, 0x44, 0x24, 0x68, 0xba, 0x9e, 0x43, 0x37, 0x03, 0x71, + 0xb5, 0x62, 0xa6, 0xde, 0x51, 0xb6, 0xda, 0xce, 0x8b, 0xd1, 0x38, 0xb3, 0x91, 0x49, 0x85, 0x73, + 0x4a, 0xa3, 0xbb, 0x30, 0x9d, 0x81, 0xf1, 0x1b, 0x6e, 0x6d, 0x7f, 0xfa, 0x14, 0xe3, 0xfc, 0x21, + 0xc1, 0x79, 0x7a, 0x23, 0x87, 0xee, 0xb0, 0x03, 0x0e, 0xe7, 0x72, 0x47, 0x37, 0x60, 0x9c, 0xed, + 0x40, 0x95, 0x76, 0xa3, 0x21, 0x2a, 0x1c, 0x63, 0x15, 0x3e, 0x21, 0xcf, 0xe3, 0xb2, 0x89, 0x3e, + 0x3c, 0x98, 0x85, 0xf8, 0x1f, 0x4e, 0x96, 0x46, 0x9b, 0xcc, 0xaa, 0xd8, 0x0e, 0xdc, 0x68, 0x9f, + 0xee, 0x1b, 0xe4, 0x6e, 0x34, 0x3d, 0xde, 0x51, 0xc3, 0xa2, 0x93, 0x2a, 0xd3, 0xa3, 0x0e, 0xc4, + 0x49, 0x86, 0x74, 0x4b, 0x0d, 0xa3, 0xba, 0xeb, 0x4d, 0x4f, 0xf0, 0x7b, 0x89, 0xdc, 0x91, 0xaa, + 0x14, 0x88, 0x39, 0x8e, 0x59, 0x14, 0xe9, 0x8f, 0x1b, 0xf4, 0xe4, 0x9a, 0x64, 0x84, 0xb1, 0x45, + 0x51, 0x22, 0x70, 0x4c, 0x43, 0x85, 0xc9, 0x28, 0xda, 0x9f, 0x46, 0x8c, 0x54, 0x6d, 0x2c, 0x1b, + 0x1b, 0x1f, 0xc3, 0x14, 0x6e, 0x6f, 0xc2, 0x98, 0xda, 0x08, 0x59, 0x9f, 0xa0, 0x59, 0xe8, 0x67, + 0xe2, 0x93, 0xd0, 0x07, 0x96, 0x68, 0x13, 0x98, 0x68, 0x85, 0x39, 0x9c, 0x35, 0xc1, 0x7d, 0x8b, + 0x2c, 0xee, 0x47, 0x84, 0xdf, 0xe9, 0x8b, 0x5a, 0x13, 0x24, 0x02, 0xc7, 0x34, 0xf6, 0xff, 0xe6, + 0x62, 0x68, 0xbc, 0xdb, 0xf6, 0x70, 0xbe, 0x3c, 0x03, 0x43, 0x3b, 0x7e, 0x18, 0x51, 0x6a, 0x56, + 0x47, 0x7f, 0x2c, 0x78, 0x5e, 0x15, 0x70, 0xac, 0x28, 0xd0, 0x2b, 0x30, 0x5a, 0xd3, 0x2b, 0x10, + 0x87, 0xa3, 0xda, 0x46, 0x8c, 0xda, 0xb1, 0x49, 0x8b, 0x5e, 0x82, 0x21, 0xe6, 0x25, 0x53, 0xf3, + 0x1b, 0x42, 0x6a, 0x93, 0x27, 0xfc, 0x50, 0x45, 0xc0, 0x0f, 0xb5, 0xdf, 0x58, 0x51, 0xa3, 0x8b, + 0x30, 0x40, 0x9b, 0x50, 0xae, 0x88, 0x63, 0x49, 0xa9, 0xb6, 0xae, 0x32, 0x28, 0x16, 0x58, 0xfb, + 0xff, 0x2f, 0x68, 0xbd, 0x4c, 0xef, 0xc3, 0x04, 0x55, 0x60, 0xf0, 0x8e, 0xe3, 0x46, 0xae, 0xb7, + 0x2d, 0xe4, 0x8f, 0xa7, 0x3a, 0x9e, 0x51, 0xac, 0xd0, 0x6d, 0x5e, 0x80, 0x9f, 0xa2, 0xe2, 0x0f, + 0x96, 0x6c, 0x28, 0xc7, 0xa0, 0xed, 0x79, 0x94, 0x63, 0xa1, 0x57, 0x8e, 0x98, 0x17, 0xe0, 0x1c, + 0xc5, 0x1f, 0x2c, 0xd9, 0xa0, 0x37, 0x00, 0xe4, 0x0a, 0x23, 0x75, 0xe1, 0x9d, 0xf2, 0x4c, 0x77, + 0xa6, 0x1b, 0xaa, 0xcc, 0xe2, 0x18, 0x3d, 0xa3, 0xe3, 0xff, 0x58, 0xe3, 0x67, 0x47, 0x4c, 0x4e, + 0x4b, 0x37, 0x06, 0x7d, 0x82, 0x4e, 0x71, 0x27, 0x88, 0x48, 0x7d, 0x21, 0x12, 0x9d, 0xf3, 0xde, + 0xde, 0x2e, 0x29, 0x1b, 0x6e, 0x93, 0xe8, 0xcb, 0x41, 0x30, 0xc1, 0x31, 0x3f, 0xfb, 0x17, 0x8b, + 0x30, 0x9d, 0xd7, 0x5c, 0x3a, 0xe9, 0xc8, 0x5d, 0x37, 0x5a, 0xa2, 0xe2, 0x95, 0x65, 0x4e, 0xba, + 0x15, 0x01, 0xc7, 0x8a, 0x82, 0x8e, 0x7e, 0xe8, 0x6e, 0xcb, 0x3b, 0x66, 0x7f, 0x3c, 0xfa, 0x55, + 0x06, 0xc5, 0x02, 0x4b, 0xe9, 0x02, 0xe2, 0x84, 0xc2, 0xfd, 0x49, 0x9b, 0x25, 0x98, 0x41, 0xb1, + 0xc0, 0xea, 0xda, 0xae, 0xbe, 0x2e, 0xda, 0x2e, 0xa3, 0x8b, 0xfa, 0x8f, 0xb7, 0x8b, 0xd0, 0xa7, + 0x00, 0xb6, 0x5c, 0xcf, 0x0d, 0x77, 0x18, 0xf7, 0x81, 0x23, 0x73, 0x57, 0xc2, 0xd9, 0xaa, 0xe2, + 0x82, 0x35, 0x8e, 0xe8, 0x45, 0x18, 0x56, 0x0b, 0xb0, 0xbc, 0xcc, 0x6c, 0xc1, 0x9a, 0x6f, 0x4d, + 0xbc, 0x1b, 0x2d, 0x63, 0x9d, 0xce, 0xfe, 0x4c, 0x72, 0xbe, 0x88, 0x15, 0xa0, 0xf5, 0xaf, 0xd5, + 0x6b, 0xff, 0x16, 0x3a, 0xf7, 0xaf, 0xfd, 0xcd, 0x22, 0x8c, 0x1b, 0x95, 0xb5, 0xc3, 0x1e, 0xf6, + 0xac, 0x2b, 0x74, 0x03, 0x77, 0x22, 0x22, 0xd6, 0x9f, 0xdd, 0x7d, 0xa9, 0xe8, 0x9b, 0x3c, 0x5d, + 0x01, 0xbc, 0x3c, 0xfa, 0x14, 0x94, 0x1a, 0x4e, 0xc8, 0x34, 0x67, 0x44, 0xac, 0xbb, 0x5e, 0x98, + 0xc5, 0x17, 0x13, 0x27, 0x8c, 0xb4, 0x53, 0x93, 0xf3, 0x8e, 0x59, 0xd2, 0x93, 0x86, 0xca, 0x27, + 0xd2, 0xbf, 0x4e, 0x35, 0x82, 0x0a, 0x31, 0xfb, 0x98, 0xe3, 0xd0, 0x4b, 0x30, 0x12, 0x10, 0x36, + 0x2b, 0x96, 0xa8, 0x34, 0xc7, 0xa6, 0x59, 0x7f, 0x2c, 0xf6, 0x61, 0x0d, 0x87, 0x0d, 0xca, 0xf8, + 0x6e, 0x30, 0xd0, 0xe1, 0x6e, 0xf0, 0x14, 0x0c, 0xb2, 0x1f, 0x6a, 0x06, 0xa8, 0xd1, 0x28, 0x73, + 0x30, 0x96, 0xf8, 0xe4, 0x84, 0x19, 0xea, 0x6d, 0xc2, 0xd0, 0xdb, 0x87, 0x98, 0xd4, 0xcc, 0x0e, + 0x3f, 0xc4, 0x77, 0x39, 0x31, 0xe5, 0xb1, 0xc4, 0xd9, 0xef, 0x85, 0xb1, 0x65, 0x87, 0x34, 0x7d, + 0x6f, 0xc5, 0xab, 0xb7, 0x7c, 0xd7, 0x8b, 0xd0, 0x34, 0xf4, 0xb1, 0x43, 0x84, 0x6f, 0x01, 0x7d, + 0xb4, 0x22, 0xcc, 0x20, 0xf6, 0x36, 0x9c, 0x5e, 0xf6, 0xef, 0x78, 0x77, 0x9c, 0xa0, 0xbe, 0x50, + 0x29, 0x6b, 0xf7, 0xeb, 0x75, 0x79, 0xbf, 0xe3, 0x6e, 0x6d, 0x99, 0x5b, 0xaf, 0x56, 0x92, 0x8b, + 0xb5, 0xab, 0x6e, 0x83, 0xe4, 0x68, 0x41, 0xfe, 0x52, 0xc1, 0xa8, 0x29, 0xa6, 0x57, 0x76, 0x38, + 0x2b, 0xd7, 0x0e, 0xf7, 0x3a, 0x0c, 0x6d, 0xb9, 0xa4, 0x51, 0xc7, 0x64, 0x4b, 0xcc, 0xc4, 0x27, + 0xf3, 0x3d, 0x75, 0x56, 0x29, 0xa5, 0xd4, 0x7a, 0xf1, 0xdb, 0xe1, 0xaa, 0x28, 0x8c, 0x15, 0x1b, + 0xb4, 0x0b, 0x13, 0xf2, 0xc2, 0x20, 0xb1, 0x62, 0x5e, 0x3e, 0xd5, 0xe9, 0x16, 0x62, 0x32, 0x3f, + 0x75, 0xef, 0x60, 0x76, 0x02, 0x27, 0xd8, 0xe0, 0x14, 0x63, 0x7a, 0x1d, 0x6c, 0xd2, 0x1d, 0xb8, + 0x8f, 0x75, 0x3f, 0xbb, 0x0e, 0xb2, 0x9b, 0x2d, 0x83, 0xda, 0x3f, 0x6e, 0xc1, 0x43, 0xa9, 0x9e, + 0x11, 0x37, 0xfc, 0x63, 0x1e, 0x85, 0xe4, 0x8d, 0xbb, 0xd0, 0xfd, 0xc6, 0x6d, 0xff, 0x75, 0x0b, + 0x4e, 0xad, 0x34, 0x5b, 0xd1, 0xfe, 0xb2, 0x6b, 0x1a, 0xcd, 0x3e, 0x00, 0x03, 0x4d, 0x52, 0x77, + 0xdb, 0x4d, 0x31, 0x72, 0xb3, 0x72, 0x97, 0x5a, 0x63, 0xd0, 0xc3, 0x83, 0xd9, 0xd1, 0x6a, 0xe4, + 0x07, 0xce, 0x36, 0xe1, 0x00, 0x2c, 0xc8, 0xd9, 0x5e, 0xef, 0xbe, 0x45, 0xae, 0xbb, 0x4d, 0x57, + 0x7a, 0x5e, 0x75, 0xd4, 0xd9, 0xcd, 0xc9, 0x0e, 0x9d, 0x7b, 0xbd, 0xed, 0x78, 0x91, 0x1b, 0xed, + 0x0b, 0x7b, 0x97, 0x64, 0x82, 0x63, 0x7e, 0xf6, 0x37, 0x2c, 0x18, 0x97, 0xf3, 0x7e, 0xa1, 0x5e, + 0x0f, 0x48, 0x18, 0xa2, 0x19, 0x28, 0xb8, 0x2d, 0xd1, 0x4a, 0x10, 0xad, 0x2c, 0x94, 0x2b, 0xb8, + 0xe0, 0xb6, 0xa4, 0x58, 0xc6, 0x36, 0xc2, 0xa2, 0x69, 0xfa, 0xbb, 0x2a, 0xe0, 0x58, 0x51, 0xa0, + 0x4b, 0x30, 0xe4, 0xf9, 0x75, 0x6e, 0xe7, 0xe2, 0x47, 0x1a, 0x9b, 0x60, 0xeb, 0x02, 0x86, 0x15, + 0x16, 0x55, 0xa0, 0xc4, 0x1d, 0xc3, 0xe2, 0x49, 0xdb, 0x93, 0x7b, 0x19, 0xfb, 0xb2, 0x0d, 0x59, + 0x12, 0xc7, 0x4c, 0xec, 0x5f, 0xb3, 0x60, 0x44, 0x7e, 0x59, 0x8f, 0x32, 0x27, 0x5d, 0x5a, 0xb1, + 0xbc, 0x19, 0x2f, 0x2d, 0x2a, 0x33, 0x32, 0x8c, 0x21, 0x2a, 0x16, 0x8f, 0x24, 0x2a, 0x5e, 0x86, + 0x61, 0xa7, 0xd5, 0xaa, 0x98, 0x72, 0x26, 0x9b, 0x4a, 0x0b, 0x31, 0x18, 0xeb, 0x34, 0xf6, 0x8f, + 0x15, 0x60, 0x4c, 0x7e, 0x41, 0xb5, 0xbd, 0x19, 0x92, 0x08, 0x6d, 0x40, 0xc9, 0xe1, 0xa3, 0x44, + 0xe4, 0x24, 0x7f, 0x2c, 0x5b, 0x8f, 0x60, 0x0c, 0x69, 0x7c, 0xe0, 0x2f, 0xc8, 0xd2, 0x38, 0x66, + 0x84, 0x1a, 0x30, 0xe9, 0xf9, 0x11, 0xdb, 0xfc, 0x15, 0xbe, 0x93, 0x69, 0x27, 0xc9, 0xfd, 0xac, + 0xe0, 0x3e, 0xb9, 0x9e, 0xe4, 0x82, 0xd3, 0x8c, 0xd1, 0x8a, 0xd4, 0xcd, 0x14, 0xf3, 0x95, 0x01, + 0xfa, 0xc0, 0x65, 0xab, 0x66, 0xec, 0x5f, 0xb1, 0xa0, 0x24, 0xc9, 0x4e, 0xc2, 0x8a, 0xb7, 0x06, + 0x83, 0x21, 0x1b, 0x04, 0xd9, 0x35, 0x76, 0xa7, 0x86, 0xf3, 0xf1, 0x8a, 0xcf, 0x34, 0xfe, 0x3f, + 0xc4, 0x92, 0x07, 0x53, 0xcd, 0xab, 0xe6, 0xbf, 0x43, 0x54, 0xf3, 0xaa, 0x3d, 0x39, 0x87, 0xd2, + 0x1f, 0xb1, 0x36, 0x6b, 0xba, 0x2e, 0x2a, 0x7a, 0xb5, 0x02, 0xb2, 0xe5, 0xde, 0x4d, 0x8a, 0x5e, + 0x15, 0x06, 0xc5, 0x02, 0x8b, 0xde, 0x80, 0x91, 0x9a, 0xd4, 0xc9, 0xc6, 0x2b, 0xfc, 0x62, 0x47, + 0xfb, 0x80, 0x32, 0x25, 0x71, 0x5d, 0xc8, 0x92, 0x56, 0x1e, 0x1b, 0xdc, 0x4c, 0xc7, 0x87, 0x62, + 0x37, 0xc7, 0x87, 0x98, 0x6f, 0xbe, 0x1b, 0xc0, 0x4f, 0x58, 0x30, 0xc0, 0x75, 0x71, 0xbd, 0xa9, + 0x42, 0x35, 0xcb, 0x5a, 0xdc, 0x77, 0xb7, 0x28, 0x50, 0x58, 0xca, 0xd0, 0x1a, 0x94, 0xd8, 0x0f, + 0xa6, 0x4b, 0x2c, 0xe6, 0xbf, 0x4b, 0xe0, 0xb5, 0xea, 0x0d, 0xbc, 0x25, 0x8b, 0xe1, 0x98, 0x83, + 0xfd, 0xa3, 0x45, 0xba, 0xbb, 0xc5, 0xa4, 0xc6, 0xa1, 0x6f, 0x3d, 0xb8, 0x43, 0xbf, 0xf0, 0xa0, + 0x0e, 0xfd, 0x6d, 0x18, 0xaf, 0x69, 0x76, 0xb8, 0x78, 0x24, 0x2f, 0x75, 0x9c, 0x24, 0x9a, 0xc9, + 0x8e, 0x6b, 0x59, 0x96, 0x4c, 0x26, 0x38, 0xc9, 0x15, 0x7d, 0x02, 0x46, 0xf8, 0x38, 0x8b, 0x5a, + 0xb8, 0xef, 0xc8, 0x13, 0xf9, 0xf3, 0x45, 0xaf, 0x82, 0x6b, 0xe5, 0xb4, 0xe2, 0xd8, 0x60, 0x66, + 0xff, 0xa9, 0x05, 0x68, 0xa5, 0xb5, 0x43, 0x9a, 0x24, 0x70, 0x1a, 0xb1, 0x3a, 0xfd, 0x87, 0x2c, + 0x98, 0x26, 0x29, 0xf0, 0x92, 0xdf, 0x6c, 0x8a, 0x4b, 0x4b, 0xce, 0xbd, 0x7a, 0x25, 0xa7, 0x8c, + 0x7a, 0xb8, 0x31, 0x9d, 0x47, 0x81, 0x73, 0xeb, 0x43, 0x6b, 0x30, 0xc5, 0x4f, 0x49, 0x85, 0xd0, + 0xfc, 0x50, 0x1e, 0x16, 0x8c, 0xa7, 0x36, 0xd2, 0x24, 0x38, 0xab, 0x9c, 0xfd, 0xbd, 0x23, 0x90, + 0xdb, 0x8a, 0x77, 0xed, 0x08, 0xef, 0xda, 0x11, 0xde, 0xb5, 0x23, 0xbc, 0x6b, 0x47, 0x78, 0xd7, + 0x8e, 0xf0, 0x1d, 0x6f, 0x47, 0xf8, 0x0b, 0x16, 0x9c, 0x56, 0xc7, 0x80, 0x71, 0xf1, 0xfd, 0x2c, + 0x4c, 0xf1, 0xe5, 0x66, 0xf8, 0x2e, 0x8a, 0x63, 0xef, 0x72, 0xe6, 0xcc, 0x4d, 0xf8, 0xd8, 0x1a, + 0x05, 0xf9, 0x63, 0x85, 0x0c, 0x04, 0xce, 0xaa, 0xc6, 0xfe, 0xc5, 0x21, 0xe8, 0x5f, 0xd9, 0x23, + 0x5e, 0x74, 0x02, 0x57, 0x84, 0x1a, 0x8c, 0xb9, 0xde, 0x9e, 0xdf, 0xd8, 0x23, 0x75, 0x8e, 0x3f, + 0xca, 0x4d, 0xf6, 0x8c, 0x60, 0x3d, 0x56, 0x36, 0x58, 0xe0, 0x04, 0xcb, 0x07, 0xa1, 0x4d, 0xbe, + 0x02, 0x03, 0x7c, 0x13, 0x17, 0xaa, 0xe4, 0xcc, 0x3d, 0x9b, 0x75, 0xa2, 0x38, 0x9a, 0x62, 0x4d, + 0x37, 0x3f, 0x24, 0x44, 0x71, 0xf4, 0x19, 0x18, 0xdb, 0x72, 0x83, 0x30, 0xda, 0x70, 0x9b, 0x24, + 0x8c, 0x9c, 0x66, 0xeb, 0x3e, 0xb4, 0xc7, 0xaa, 0x1f, 0x56, 0x0d, 0x4e, 0x38, 0xc1, 0x19, 0x6d, + 0xc3, 0x68, 0xc3, 0xd1, 0xab, 0x1a, 0x3c, 0x72, 0x55, 0xea, 0x74, 0xb8, 0xae, 0x33, 0xc2, 0x26, + 0x5f, 0xba, 0x9c, 0x6a, 0x4c, 0x01, 0x3a, 0xc4, 0xd4, 0x02, 0x6a, 0x39, 0x71, 0xcd, 0x27, 0xc7, + 0x51, 0x41, 0x87, 0x39, 0xc8, 0x96, 0x4c, 0x41, 0x47, 0x73, 0x83, 0xfd, 0x34, 0x94, 0x08, 0xed, + 0x42, 0xca, 0x58, 0x1c, 0x30, 0xf3, 0xbd, 0xb5, 0x75, 0xcd, 0xad, 0x05, 0xbe, 0xa9, 0xb7, 0x5f, + 0x91, 0x9c, 0x70, 0xcc, 0x14, 0x2d, 0xc1, 0x40, 0x48, 0x02, 0x97, 0x84, 0xe2, 0xa8, 0xe9, 0x30, + 0x8c, 0x8c, 0x8c, 0xbf, 0x86, 0xe1, 0xbf, 0xb1, 0x28, 0x4a, 0xa7, 0x97, 0xc3, 0x54, 0x9a, 0xec, + 0x30, 0xd0, 0xa6, 0xd7, 0x02, 0x83, 0x62, 0x81, 0x45, 0xaf, 0xc1, 0x60, 0x40, 0x1a, 0xcc, 0x30, + 0x34, 0xda, 0xfb, 0x24, 0xe7, 0x76, 0x26, 0x5e, 0x0e, 0x4b, 0x06, 0xe8, 0x1a, 0xa0, 0x80, 0x50, + 0x41, 0xc9, 0xf5, 0xb6, 0x95, 0xdb, 0xa8, 0xd8, 0x68, 0x95, 0x40, 0x8a, 0x63, 0x0a, 0xf9, 0x10, + 0x0a, 0x67, 0x14, 0x43, 0x57, 0x60, 0x52, 0x41, 0xcb, 0x5e, 0x18, 0x39, 0x74, 0x83, 0x1b, 0x67, + 0xbc, 0x94, 0x9e, 0x02, 0x27, 0x09, 0x70, 0xba, 0x8c, 0xfd, 0x65, 0x0b, 0x78, 0x3f, 0x9f, 0xc0, + 0xed, 0xfc, 0x55, 0xf3, 0x76, 0x7e, 0x36, 0x77, 0xe4, 0x72, 0x6e, 0xe6, 0x5f, 0xb6, 0x60, 0x58, + 0x1b, 0xd9, 0x78, 0xce, 0x5a, 0x1d, 0xe6, 0x6c, 0x1b, 0x26, 0xe8, 0x4c, 0xbf, 0xb1, 0x19, 0x92, + 0x60, 0x8f, 0xd4, 0xd9, 0xc4, 0x2c, 0xdc, 0xdf, 0xc4, 0x54, 0x2e, 0x6a, 0xd7, 0x13, 0x0c, 0x71, + 0xaa, 0x0a, 0xfb, 0xd3, 0xb2, 0xa9, 0xca, 0xa3, 0xaf, 0xa6, 0xc6, 0x3c, 0xe1, 0xd1, 0xa7, 0x46, + 0x15, 0xc7, 0x34, 0x74, 0xa9, 0xed, 0xf8, 0x61, 0x94, 0xf4, 0xe8, 0xbb, 0xea, 0x87, 0x11, 0x66, + 0x18, 0xfb, 0x79, 0x80, 0x95, 0xbb, 0xa4, 0xc6, 0x67, 0xac, 0x7e, 0x79, 0xb0, 0xf2, 0x2f, 0x0f, + 0xf6, 0x6f, 0x5b, 0x30, 0xb6, 0xba, 0x64, 0x9c, 0x5c, 0x73, 0x00, 0xfc, 0xc6, 0x73, 0xfb, 0xf6, + 0xba, 0x34, 0x87, 0x73, 0x8b, 0xa6, 0x82, 0x62, 0x8d, 0x02, 0x9d, 0x85, 0x62, 0xa3, 0xed, 0x09, + 0xf5, 0xe1, 0x20, 0x3d, 0x1e, 0xaf, 0xb7, 0x3d, 0x4c, 0x61, 0xda, 0x23, 0x88, 0x62, 0xcf, 0x8f, + 0x20, 0xba, 0x06, 0x3f, 0x40, 0xb3, 0xd0, 0x7f, 0xe7, 0x8e, 0x5b, 0xe7, 0x4f, 0x4c, 0x85, 0xa9, + 0xfe, 0xf6, 0xed, 0xf2, 0x72, 0x88, 0x39, 0xdc, 0xfe, 0x62, 0x11, 0x66, 0x56, 0x1b, 0xe4, 0xee, + 0xdb, 0x7c, 0x66, 0xdb, 0xeb, 0x13, 0x8e, 0xa3, 0x29, 0x62, 0x8e, 0xfa, 0x4c, 0xa7, 0x7b, 0x7f, + 0x6c, 0xc1, 0x20, 0x77, 0x68, 0x93, 0x8f, 0x6e, 0x5f, 0xc9, 0xaa, 0x3d, 0xbf, 0x43, 0xe6, 0xb8, + 0x63, 0x9c, 0x78, 0xc3, 0xa7, 0x0e, 0x4c, 0x01, 0xc5, 0x92, 0xf9, 0xcc, 0xcb, 0x30, 0xa2, 0x53, + 0x1e, 0xe9, 0xc1, 0xdc, 0xff, 0x53, 0x84, 0x09, 0xda, 0x82, 0x07, 0x3a, 0x10, 0x37, 0xd3, 0x03, + 0x71, 0xdc, 0x8f, 0xa6, 0xba, 0x8f, 0xc6, 0x1b, 0xc9, 0xd1, 0xb8, 0x9c, 0x37, 0x1a, 0x27, 0x3d, + 0x06, 0xdf, 0x63, 0xc1, 0xd4, 0x6a, 0xc3, 0xaf, 0xed, 0x26, 0x1e, 0x36, 0xbd, 0x08, 0xc3, 0x74, + 0x3b, 0x0e, 0x8d, 0x37, 0xfe, 0x46, 0xd4, 0x07, 0x81, 0xc2, 0x3a, 0x9d, 0x56, 0xec, 0xe6, 0xcd, + 0xf2, 0x72, 0x56, 0xb0, 0x08, 0x81, 0xc2, 0x3a, 0x9d, 0xfd, 0x9b, 0x16, 0x9c, 0xbb, 0xb2, 0xb4, + 0x12, 0x4f, 0xc5, 0x54, 0xbc, 0x8a, 0x8b, 0x30, 0xd0, 0xaa, 0x6b, 0x4d, 0x89, 0xd5, 0xab, 0xcb, + 0xac, 0x15, 0x02, 0xfb, 0x4e, 0x89, 0xc5, 0x72, 0x13, 0xe0, 0x0a, 0xae, 0x2c, 0x89, 0x7d, 0x57, + 0x5a, 0x53, 0xac, 0x5c, 0x6b, 0xca, 0x13, 0x30, 0x48, 0xcf, 0x05, 0xb7, 0x26, 0xdb, 0xcd, 0x0d, + 0xb4, 0x1c, 0x84, 0x25, 0xce, 0xfe, 0x39, 0x0b, 0xa6, 0xae, 0xb8, 0x11, 0x3d, 0xb4, 0x93, 0x01, + 0x19, 0xe8, 0xa9, 0x1d, 0xba, 0x91, 0x1f, 0xec, 0x27, 0x03, 0x32, 0x60, 0x85, 0xc1, 0x1a, 0x15, + 0xff, 0xa0, 0x3d, 0x97, 0x79, 0x68, 0x17, 0x4c, 0xfb, 0x15, 0x16, 0x70, 0xac, 0x28, 0x68, 0x7f, + 0xd5, 0xdd, 0x80, 0xa9, 0xfe, 0xf6, 0xc5, 0xc6, 0xad, 0xfa, 0x6b, 0x59, 0x22, 0x70, 0x4c, 0x63, + 0xff, 0xb1, 0x05, 0xb3, 0x57, 0x1a, 0xed, 0x30, 0x22, 0xc1, 0x56, 0x98, 0xb3, 0xe9, 0x3e, 0x0f, + 0x25, 0x22, 0x15, 0xed, 0xf2, 0x29, 0x99, 0x14, 0x44, 0x95, 0x06, 0x9e, 0xc7, 0x85, 0x50, 0x74, + 0x3d, 0xbc, 0xbe, 0x3c, 0xda, 0xf3, 0xb9, 0x55, 0x40, 0x44, 0xaf, 0x4b, 0x0f, 0x94, 0xc1, 0x5e, + 0xdc, 0xaf, 0xa4, 0xb0, 0x38, 0xa3, 0x84, 0xfd, 0xe3, 0x16, 0x9c, 0x56, 0x1f, 0xfc, 0x8e, 0xfb, + 0x4c, 0xfb, 0x6b, 0x05, 0x18, 0xbd, 0xba, 0xb1, 0x51, 0xb9, 0x42, 0x22, 0x6d, 0x56, 0x76, 0x36, + 0x9f, 0x63, 0xcd, 0x0a, 0xd8, 0xe9, 0x8e, 0xd8, 0x8e, 0xdc, 0xc6, 0x1c, 0x8f, 0xb7, 0x34, 0x57, + 0xf6, 0xa2, 0x1b, 0x41, 0x35, 0x0a, 0x5c, 0x6f, 0x3b, 0x73, 0xa6, 0x4b, 0x99, 0xa5, 0x98, 0x27, + 0xb3, 0xa0, 0xe7, 0x61, 0x80, 0x05, 0x7c, 0x92, 0x83, 0xf0, 0xb0, 0xba, 0x62, 0x31, 0xe8, 0xe1, + 0xc1, 0x6c, 0xe9, 0x26, 0x2e, 0xf3, 0x3f, 0x58, 0x90, 0xa2, 0x9b, 0x30, 0xbc, 0x13, 0x45, 0xad, + 0xab, 0xc4, 0xa9, 0x93, 0x40, 0xee, 0xb2, 0xe7, 0xb3, 0x76, 0x59, 0xda, 0x09, 0x9c, 0x2c, 0xde, + 0x98, 0x62, 0x58, 0x88, 0x75, 0x3e, 0x76, 0x15, 0x20, 0xc6, 0x1d, 0x93, 0x01, 0xc4, 0xde, 0x80, + 0x12, 0xfd, 0xdc, 0x85, 0x86, 0xeb, 0x74, 0x36, 0x31, 0x3f, 0x0d, 0x25, 0x69, 0x40, 0x0e, 0xc5, + 0xeb, 0x70, 0x76, 0x22, 0x49, 0xfb, 0x72, 0x88, 0x63, 0xbc, 0xbd, 0x05, 0xa7, 0x98, 0x3b, 0xa0, + 0x13, 0xed, 0x18, 0xb3, 0xaf, 0xfb, 0x30, 0x3f, 0x23, 0x6e, 0x6c, 0xbc, 0xcd, 0xd3, 0xda, 0x73, + 0xc6, 0x11, 0xc9, 0x31, 0xbe, 0xbd, 0xd9, 0xdf, 0xec, 0x83, 0x87, 0xcb, 0xd5, 0xfc, 0x80, 0x25, + 0x2f, 0xc1, 0x08, 0x17, 0x04, 0xe9, 0xa0, 0x3b, 0x0d, 0x51, 0xaf, 0xd2, 0x6d, 0x6e, 0x68, 0x38, + 0x6c, 0x50, 0xa2, 0x73, 0x50, 0x74, 0xdf, 0xf4, 0x92, 0x8f, 0x7d, 0xca, 0xaf, 0xaf, 0x63, 0x0a, + 0xa7, 0x68, 0x2a, 0x53, 0xf2, 0xcd, 0x5a, 0xa1, 0x95, 0x5c, 0xf9, 0x2a, 0x8c, 0xb9, 0x61, 0x2d, + 0x74, 0xcb, 0x1e, 0x5d, 0x81, 0xda, 0x1a, 0x56, 0xda, 0x04, 0xda, 0x68, 0x85, 0xc5, 0x09, 0x6a, + 0xed, 0xe4, 0xe8, 0xef, 0x59, 0x2e, 0xed, 0xfa, 0x5c, 0x9a, 0x6e, 0xec, 0x2d, 0xf6, 0x75, 0x21, + 0x53, 0x52, 0x8b, 0x8d, 0x9d, 0x7f, 0x70, 0x88, 0x25, 0x8e, 0x5e, 0xd5, 0x6a, 0x3b, 0x4e, 0x6b, + 0xa1, 0x1d, 0xed, 0x2c, 0xbb, 0x61, 0xcd, 0xdf, 0x23, 0xc1, 0x3e, 0xbb, 0x65, 0x0f, 0xc5, 0x57, + 0x35, 0x85, 0x58, 0xba, 0xba, 0x50, 0xa1, 0x94, 0x38, 0x5d, 0x06, 0x2d, 0xc0, 0xb8, 0x04, 0x56, + 0x49, 0xc8, 0x36, 0xf7, 0x61, 0xc6, 0x46, 0x3d, 0xbf, 0x11, 0x60, 0xc5, 0x24, 0x49, 0x6f, 0x8a, + 0xae, 0x70, 0x1c, 0xa2, 0xeb, 0x07, 0x60, 0xd4, 0xf5, 0xdc, 0xc8, 0x75, 0x22, 0x9f, 0x5b, 0x58, + 0xf8, 0x85, 0x9a, 0xa9, 0x8e, 0xcb, 0x3a, 0x02, 0x9b, 0x74, 0xf6, 0xbf, 0xef, 0x83, 0x49, 0x36, + 0x6c, 0xef, 0xce, 0xb0, 0xef, 0xa4, 0x19, 0x76, 0x33, 0x3d, 0xc3, 0x8e, 0x43, 0x26, 0xbf, 0xef, + 0x69, 0xf6, 0x19, 0x28, 0xa9, 0x17, 0x47, 0xf2, 0xc9, 0xa1, 0x95, 0xf3, 0xe4, 0xb0, 0xfb, 0xb9, + 0x2c, 0x9d, 0xb6, 0x8a, 0x99, 0x4e, 0x5b, 0x5f, 0xb1, 0x20, 0x36, 0x19, 0xa0, 0xd7, 0xa1, 0xd4, + 0xf2, 0x99, 0x2f, 0x62, 0x20, 0x1d, 0x7c, 0x1f, 0xef, 0x68, 0x73, 0xe0, 0x31, 0x9b, 0x02, 0xde, + 0x0b, 0x15, 0x59, 0x14, 0xc7, 0x5c, 0xd0, 0x35, 0x18, 0x6c, 0x05, 0xa4, 0x1a, 0xb1, 0x80, 0x22, + 0xbd, 0x33, 0xe4, 0xb3, 0x86, 0x17, 0xc4, 0x92, 0x83, 0xfd, 0x1f, 0x2c, 0x98, 0x48, 0x92, 0xa2, + 0x0f, 0x41, 0x1f, 0xb9, 0x4b, 0x6a, 0xa2, 0xbd, 0x99, 0x87, 0x6c, 0xac, 0x74, 0xe0, 0x1d, 0x40, + 0xff, 0x63, 0x56, 0x0a, 0x5d, 0x85, 0x41, 0x7a, 0xc2, 0x5e, 0x51, 0xc1, 0xb3, 0x1e, 0xcd, 0x3b, + 0xa5, 0x95, 0xa8, 0xc2, 0x1b, 0x27, 0x40, 0x58, 0x16, 0x67, 0x9e, 0x52, 0xb5, 0x56, 0x95, 0x5e, + 0x5e, 0xa2, 0x4e, 0x77, 0xec, 0x8d, 0xa5, 0x0a, 0x27, 0x12, 0xdc, 0xb8, 0xa7, 0x94, 0x04, 0xe2, + 0x98, 0x89, 0xfd, 0xf3, 0x16, 0x00, 0x77, 0x0c, 0x73, 0xbc, 0x6d, 0x72, 0x02, 0x7a, 0xf2, 0x65, + 0xe8, 0x0b, 0x5b, 0xa4, 0xd6, 0xc9, 0x4d, 0x36, 0x6e, 0x4f, 0xb5, 0x45, 0x6a, 0xf1, 0x8c, 0xa3, + 0xff, 0x30, 0x2b, 0x6d, 0x7f, 0x1f, 0xc0, 0x58, 0x4c, 0x56, 0x8e, 0x48, 0x13, 0x3d, 0x6b, 0x84, + 0x29, 0x38, 0x9b, 0x08, 0x53, 0x50, 0x62, 0xd4, 0x9a, 0x4a, 0xf6, 0x33, 0x50, 0x6c, 0x3a, 0x77, + 0x85, 0xce, 0xed, 0xe9, 0xce, 0xcd, 0xa0, 0xfc, 0xe7, 0xd6, 0x9c, 0xbb, 0xfc, 0x5a, 0xfa, 0xb4, + 0x5c, 0x21, 0x6b, 0xce, 0xdd, 0x43, 0xee, 0x0c, 0xcb, 0x76, 0xe9, 0xeb, 0x6e, 0x18, 0x7d, 0xfe, + 0xdf, 0xc5, 0xff, 0xd9, 0xba, 0xa3, 0x95, 0xb0, 0xba, 0x5c, 0x4f, 0xf8, 0x3c, 0xf5, 0x54, 0x97, + 0xeb, 0x25, 0xeb, 0x72, 0xbd, 0x1e, 0xea, 0x72, 0x3d, 0xf4, 0x16, 0x0c, 0x0a, 0x97, 0x44, 0x11, + 0xc8, 0x68, 0xbe, 0x87, 0xfa, 0x84, 0x47, 0x23, 0xaf, 0x73, 0x5e, 0x5e, 0xbb, 0x05, 0xb4, 0x6b, + 0xbd, 0xb2, 0x42, 0xf4, 0x17, 0x2d, 0x18, 0x13, 0xbf, 0x31, 0x79, 0xb3, 0x4d, 0xc2, 0x48, 0x88, + 0xa5, 0xef, 0xef, 0xbd, 0x0d, 0xa2, 0x20, 0x6f, 0xca, 0xfb, 0xe5, 0x39, 0x63, 0x22, 0xbb, 0xb6, + 0x28, 0xd1, 0x0a, 0xf4, 0x37, 0x2d, 0x38, 0xd5, 0x74, 0xee, 0xf2, 0x1a, 0x39, 0x0c, 0x3b, 0x91, + 0xeb, 0x0b, 0xd3, 0xfe, 0x87, 0x7a, 0x1b, 0xfe, 0x54, 0x71, 0xde, 0x48, 0x69, 0x7f, 0x3c, 0x95, + 0x45, 0xd2, 0xb5, 0xa9, 0x99, 0xed, 0x9a, 0xd9, 0x82, 0x21, 0x39, 0xdf, 0x32, 0x94, 0x1b, 0xcb, + 0xba, 0xcc, 0x7d, 0x64, 0x8f, 0x50, 0xfd, 0xf9, 0x3f, 0xad, 0x47, 0xcc, 0xb5, 0x07, 0x5a, 0xcf, + 0x67, 0x60, 0x44, 0x9f, 0x63, 0x0f, 0xb4, 0xae, 0x37, 0x61, 0x2a, 0x63, 0x2e, 0x3d, 0xd0, 0x2a, + 0xef, 0xc0, 0xd9, 0xdc, 0xf9, 0xf1, 0x20, 0x2b, 0xb6, 0xbf, 0x66, 0xe9, 0xfb, 0xe0, 0x09, 0x18, + 0x2b, 0x96, 0x4c, 0x63, 0xc5, 0xf9, 0xce, 0x2b, 0x27, 0xc7, 0x62, 0xf1, 0x86, 0xde, 0x68, 0xba, + 0xab, 0xa3, 0xd7, 0x60, 0xa0, 0x41, 0x21, 0xd2, 0xb1, 0xd5, 0xee, 0xbe, 0x22, 0x63, 0x61, 0x92, + 0xc1, 0x43, 0x2c, 0x38, 0xd8, 0xbf, 0x64, 0x41, 0xdf, 0x09, 0xf4, 0x04, 0x36, 0x7b, 0xe2, 0xd9, + 0x5c, 0xd6, 0x22, 0xa6, 0xf3, 0x1c, 0x76, 0xee, 0xac, 0xdc, 0x8d, 0x88, 0x17, 0xb2, 0x13, 0x39, + 0xb3, 0x63, 0x7e, 0xda, 0x82, 0xa9, 0xeb, 0xbe, 0x53, 0x5f, 0x74, 0x1a, 0x8e, 0x57, 0x23, 0x41, + 0xd9, 0xdb, 0x3e, 0x92, 0x57, 0x76, 0xa1, 0xab, 0x57, 0xf6, 0x92, 0x74, 0x6a, 0xea, 0xcb, 0x1f, + 0x3f, 0x2a, 0x49, 0x27, 0x03, 0xb7, 0x18, 0xee, 0xb7, 0x3b, 0x80, 0xf4, 0x56, 0x8a, 0x37, 0x32, + 0x18, 0x06, 0x5d, 0xde, 0x5e, 0x31, 0x88, 0x4f, 0x66, 0x4b, 0xb8, 0xa9, 0xcf, 0xd3, 0x5e, 0x7f, + 0x70, 0x00, 0x96, 0x8c, 0xec, 0x97, 0x20, 0xf3, 0xa1, 0x7d, 0x77, 0xbd, 0x84, 0xfd, 0x31, 0x98, + 0x64, 0x25, 0x8f, 0xa8, 0x19, 0xb0, 0x13, 0xda, 0xd4, 0x8c, 0xa0, 0x81, 0xf6, 0x17, 0x2c, 0x18, + 0x5f, 0x4f, 0xc4, 0x52, 0xbb, 0xc8, 0xec, 0xaf, 0x19, 0x4a, 0xfc, 0x2a, 0x83, 0x62, 0x81, 0x3d, + 0x76, 0x25, 0xd7, 0x9f, 0x5b, 0x10, 0xc7, 0xbe, 0x38, 0x01, 0xf1, 0x6d, 0xc9, 0x10, 0xdf, 0x32, + 0x05, 0x59, 0xd5, 0x9c, 0x3c, 0xe9, 0x0d, 0x5d, 0x53, 0x51, 0xa1, 0x3a, 0xc8, 0xb0, 0x31, 0x1b, + 0x3e, 0x15, 0xc7, 0xcc, 0xd0, 0x51, 0x32, 0x4e, 0x94, 0xfd, 0x3b, 0x05, 0x40, 0x8a, 0xb6, 0xe7, + 0xa8, 0x55, 0xe9, 0x12, 0xc7, 0x13, 0xb5, 0x6a, 0x0f, 0x10, 0xf3, 0x20, 0x08, 0x1c, 0x2f, 0xe4, + 0x6c, 0x5d, 0xa1, 0xd6, 0x3b, 0x9a, 0x7b, 0xc2, 0x8c, 0xa8, 0x12, 0x5d, 0x4f, 0x71, 0xc3, 0x19, + 0x35, 0x68, 0x9e, 0x21, 0xfd, 0xbd, 0x7a, 0x86, 0x0c, 0x74, 0x79, 0x07, 0xf7, 0x55, 0x0b, 0x46, + 0x55, 0x37, 0xbd, 0x43, 0xbc, 0xd4, 0x55, 0x7b, 0x72, 0x36, 0xd0, 0x8a, 0xd6, 0x64, 0x76, 0xb0, + 0x7c, 0x17, 0x7b, 0xcf, 0xe8, 0x34, 0xdc, 0xb7, 0x88, 0x8a, 0x72, 0x38, 0x2b, 0xde, 0x27, 0x0a, + 0xe8, 0xe1, 0xc1, 0xec, 0xa8, 0xfa, 0xc7, 0xa3, 0x38, 0xc7, 0x45, 0xe8, 0x96, 0x3c, 0x9e, 0x98, + 0x8a, 0xe8, 0x45, 0xe8, 0x6f, 0xed, 0x38, 0x21, 0x49, 0xbc, 0xe6, 0xe9, 0xaf, 0x50, 0xe0, 0xe1, + 0xc1, 0xec, 0x98, 0x2a, 0xc0, 0x20, 0x98, 0x53, 0xf7, 0x1e, 0x0b, 0x2c, 0x3d, 0x39, 0xbb, 0xc6, + 0x02, 0xfb, 0x53, 0x0b, 0xfa, 0xd6, 0xfd, 0xfa, 0x49, 0x6c, 0x01, 0xaf, 0x1a, 0x5b, 0xc0, 0x23, + 0x79, 0x01, 0xf6, 0x73, 0x57, 0xff, 0x6a, 0x62, 0xf5, 0x9f, 0xcf, 0xe5, 0xd0, 0x79, 0xe1, 0x37, + 0x61, 0x98, 0x85, 0xed, 0x17, 0x2f, 0x97, 0x9e, 0x37, 0x16, 0xfc, 0x6c, 0x62, 0xc1, 0x8f, 0x6b, + 0xa4, 0xda, 0x4a, 0x7f, 0x0a, 0x06, 0xc5, 0x53, 0x98, 0xe4, 0xb3, 0x50, 0x41, 0x8b, 0x25, 0xde, + 0xfe, 0x89, 0x22, 0x18, 0x69, 0x02, 0xd0, 0xaf, 0x58, 0x30, 0x17, 0x70, 0x17, 0xd9, 0xfa, 0x72, + 0x3b, 0x70, 0xbd, 0xed, 0x6a, 0x6d, 0x87, 0xd4, 0xdb, 0x0d, 0xd7, 0xdb, 0x2e, 0x6f, 0x7b, 0xbe, + 0x02, 0xaf, 0xdc, 0x25, 0xb5, 0x36, 0x33, 0xbb, 0x75, 0xc9, 0x49, 0xa0, 0x5c, 0xcd, 0x9f, 0xbb, + 0x77, 0x30, 0x3b, 0x87, 0x8f, 0xc4, 0x1b, 0x1f, 0xb1, 0x2d, 0xe8, 0x37, 0x2d, 0x98, 0xe7, 0xd1, + 0xf3, 0x7b, 0x6f, 0x7f, 0x87, 0xdb, 0x72, 0x45, 0xb2, 0x8a, 0x99, 0x6c, 0x90, 0xa0, 0xb9, 0xf8, + 0x01, 0xd1, 0xa1, 0xf3, 0x95, 0xa3, 0xd5, 0x85, 0x8f, 0xda, 0x38, 0xfb, 0x1f, 0x14, 0x61, 0x54, + 0xc4, 0x8c, 0x12, 0x67, 0xc0, 0x8b, 0xc6, 0x94, 0x78, 0x34, 0x31, 0x25, 0x26, 0x0d, 0xe2, 0xe3, + 0xd9, 0xfe, 0x43, 0x98, 0xa4, 0x9b, 0xf3, 0x55, 0xe2, 0x04, 0xd1, 0x26, 0x71, 0xb8, 0xc3, 0x57, + 0xf1, 0xc8, 0xbb, 0xbf, 0xd2, 0x4f, 0x5e, 0x4f, 0x32, 0xc3, 0x69, 0xfe, 0xdf, 0x49, 0x67, 0x8e, + 0x07, 0x13, 0xa9, 0xb0, 0x5f, 0x1f, 0x87, 0x92, 0x7a, 0xc7, 0x21, 0x36, 0x9d, 0xce, 0xd1, 0xf3, + 0x92, 0x1c, 0xb8, 0xfa, 0x2b, 0x7e, 0x43, 0x14, 0xb3, 0xb3, 0xff, 0x76, 0xc1, 0xa8, 0x90, 0x0f, + 0xe2, 0x3a, 0x0c, 0x39, 0x61, 0xe8, 0x6e, 0x7b, 0xa4, 0xde, 0x49, 0x43, 0x99, 0xaa, 0x86, 0xbd, + 0xa5, 0x59, 0x10, 0x25, 0xb1, 0xe2, 0x81, 0xae, 0x72, 0xb7, 0xba, 0x3d, 0xd2, 0x49, 0x3d, 0x99, + 0xe2, 0x06, 0xd2, 0xf1, 0x6e, 0x8f, 0x60, 0x51, 0x1e, 0x7d, 0x92, 0xfb, 0x3d, 0x5e, 0xf3, 0xfc, + 0x3b, 0xde, 0x15, 0xdf, 0x97, 0x71, 0x19, 0x7a, 0x63, 0x38, 0x29, 0xbd, 0x1d, 0x55, 0x71, 0x6c, + 0x72, 0xeb, 0x2d, 0x8e, 0xe6, 0x67, 0x81, 0x45, 0x0b, 0x37, 0x9f, 0x4d, 0x87, 0x88, 0xc0, 0xb8, + 0x08, 0x48, 0x26, 0x61, 0xa2, 0xef, 0x32, 0xaf, 0x72, 0x66, 0xe9, 0x58, 0x91, 0x7e, 0xcd, 0x64, + 0x81, 0x93, 0x3c, 0xed, 0x9f, 0xb5, 0x80, 0x3d, 0x21, 0x3d, 0x01, 0x79, 0xe4, 0xc3, 0xa6, 0x3c, + 0x32, 0x9d, 0xd7, 0xc9, 0x39, 0xa2, 0xc8, 0x0b, 0x7c, 0x66, 0x55, 0x02, 0xff, 0xee, 0xbe, 0x70, + 0x56, 0xe9, 0x7e, 0xff, 0xb0, 0xff, 0xa7, 0xc5, 0x37, 0x31, 0xf5, 0xca, 0x02, 0x7d, 0x0e, 0x86, + 0x6a, 0x4e, 0xcb, 0xa9, 0xf1, 0x9c, 0x36, 0xb9, 0x1a, 0x3d, 0xa3, 0xd0, 0xdc, 0x92, 0x28, 0xc1, + 0x35, 0x54, 0x32, 0xb0, 0xdd, 0x90, 0x04, 0x77, 0xd5, 0x4a, 0xa9, 0x2a, 0x67, 0x76, 0x61, 0xd4, + 0x60, 0xf6, 0x40, 0xd5, 0x19, 0x9f, 0xe3, 0x47, 0xac, 0x0a, 0xc4, 0xd8, 0x84, 0x49, 0x4f, 0xfb, + 0x4f, 0x0f, 0x14, 0x79, 0xb9, 0x7c, 0xbc, 0xdb, 0x21, 0xca, 0x4e, 0x1f, 0xed, 0x75, 0x6a, 0x82, + 0x0d, 0x4e, 0x73, 0xb6, 0x7f, 0xd2, 0x82, 0x87, 0x74, 0x42, 0xed, 0x01, 0x4c, 0x37, 0x23, 0xc9, + 0x32, 0x0c, 0xf9, 0x2d, 0x12, 0x38, 0x91, 0x1f, 0x88, 0x53, 0xe3, 0x92, 0xec, 0xf4, 0x1b, 0x02, + 0x7e, 0x28, 0x22, 0xb4, 0x4b, 0xee, 0x12, 0x8e, 0x55, 0x49, 0x7a, 0xfb, 0x64, 0x9d, 0x11, 0x8a, + 0xa7, 0x4e, 0x6c, 0x0f, 0x60, 0x96, 0xf4, 0x10, 0x0b, 0x8c, 0xfd, 0x4d, 0x8b, 0x4f, 0x2c, 0xbd, + 0xe9, 0xe8, 0x4d, 0x98, 0x68, 0x3a, 0x51, 0x6d, 0x67, 0xe5, 0x6e, 0x2b, 0xe0, 0x26, 0x27, 0xd9, + 0x4f, 0x4f, 0x77, 0xeb, 0x27, 0xed, 0x23, 0x63, 0x57, 0xce, 0xb5, 0x04, 0x33, 0x9c, 0x62, 0x8f, + 0x36, 0x61, 0x98, 0xc1, 0xd8, 0x2b, 0xbe, 0xb0, 0x93, 0x68, 0x90, 0x57, 0x9b, 0x72, 0x46, 0x58, + 0x8b, 0xf9, 0x60, 0x9d, 0xa9, 0xfd, 0x95, 0x22, 0x5f, 0xed, 0x4c, 0x94, 0x7f, 0x0a, 0x06, 0x5b, + 0x7e, 0x7d, 0xa9, 0xbc, 0x8c, 0xc5, 0x28, 0xa8, 0x63, 0xa4, 0xc2, 0xc1, 0x58, 0xe2, 0xd1, 0x25, + 0x18, 0x12, 0x3f, 0xa5, 0x89, 0x90, 0xed, 0xcd, 0x82, 0x2e, 0xc4, 0x0a, 0x8b, 0x9e, 0x03, 0x68, + 0x05, 0xfe, 0x9e, 0x5b, 0x67, 0xd1, 0x25, 0x8a, 0xa6, 0x1f, 0x51, 0x45, 0x61, 0xb0, 0x46, 0x85, + 0x5e, 0x81, 0xd1, 0xb6, 0x17, 0x72, 0x71, 0x44, 0x8b, 0x25, 0xab, 0x3c, 0x5c, 0x6e, 0xea, 0x48, + 0x6c, 0xd2, 0xa2, 0x05, 0x18, 0x88, 0x1c, 0xe6, 0x17, 0xd3, 0x9f, 0xef, 0xee, 0xbb, 0x41, 0x29, + 0xf4, 0xf4, 0x29, 0xb4, 0x00, 0x16, 0x05, 0xd1, 0xc7, 0xe5, 0x83, 0x5a, 0xbe, 0xb1, 0x0b, 0x3f, + 0xfb, 0xde, 0x0e, 0x01, 0xed, 0x39, 0xad, 0xf0, 0xdf, 0x37, 0x78, 0xa1, 0x97, 0x01, 0xc8, 0xdd, + 0x88, 0x04, 0x9e, 0xd3, 0x50, 0xde, 0x6c, 0x4a, 0x2e, 0x58, 0xf6, 0xd7, 0xfd, 0xe8, 0x66, 0x48, + 0x56, 0x14, 0x05, 0xd6, 0xa8, 0xed, 0xdf, 0x2c, 0x01, 0xc4, 0x72, 0x3b, 0x7a, 0x2b, 0xb5, 0x71, + 0x3d, 0xd3, 0x59, 0xd2, 0x3f, 0xbe, 0x5d, 0x0b, 0x7d, 0xbf, 0x05, 0xc3, 0x4e, 0xa3, 0xe1, 0xd7, + 0x1c, 0x1e, 0xed, 0xb7, 0xd0, 0x79, 0xe3, 0x14, 0xf5, 0x2f, 0xc4, 0x25, 0x78, 0x13, 0x9e, 0x97, + 0x33, 0x54, 0xc3, 0x74, 0x6d, 0x85, 0x5e, 0x31, 0x7a, 0x9f, 0xbc, 0x2a, 0x16, 0x8d, 0xae, 0x54, + 0x57, 0xc5, 0x12, 0x3b, 0x23, 0xf4, 0x5b, 0xe2, 0x4d, 0xe3, 0x96, 0xd8, 0x97, 0xff, 0x62, 0xd0, + 0x10, 0x5f, 0xbb, 0x5d, 0x10, 0x51, 0x45, 0x8f, 0x1e, 0xd0, 0x9f, 0xff, 0x3c, 0x4f, 0xbb, 0x27, + 0x75, 0x89, 0x1c, 0xf0, 0x19, 0x18, 0xaf, 0x9b, 0x42, 0x80, 0x98, 0x89, 0x4f, 0xe6, 0xf1, 0x4d, + 0xc8, 0x0c, 0xf1, 0xb1, 0x9f, 0x40, 0xe0, 0x24, 0x63, 0x54, 0xe1, 0xc1, 0x24, 0xca, 0xde, 0x96, + 0x2f, 0xde, 0x7a, 0xd8, 0xb9, 0x63, 0xb9, 0x1f, 0x46, 0xa4, 0x49, 0x29, 0xe3, 0xd3, 0x7d, 0x5d, + 0x94, 0xc5, 0x8a, 0x0b, 0x7a, 0x0d, 0x06, 0xd8, 0xfb, 0xac, 0x70, 0x7a, 0x28, 0x5f, 0xe3, 0x6c, + 0x46, 0x47, 0x8b, 0x17, 0x24, 0xfb, 0x1b, 0x62, 0xc1, 0x01, 0x5d, 0x95, 0xaf, 0x1f, 0xc3, 0xb2, + 0x77, 0x33, 0x24, 0xec, 0xf5, 0x63, 0x69, 0xf1, 0xf1, 0xf8, 0x61, 0x23, 0x87, 0x67, 0x26, 0x59, + 0x33, 0x4a, 0x52, 0x29, 0x4a, 0xfc, 0x97, 0xb9, 0xdb, 0xa6, 0x21, 0xbf, 0x79, 0x66, 0x7e, 0xb7, + 0xb8, 0x3b, 0x6f, 0x99, 0x2c, 0x70, 0x92, 0x27, 0x95, 0x48, 0xf9, 0xaa, 0x17, 0xaf, 0x45, 0xba, + 0xed, 0x1d, 0xfc, 0x22, 0xce, 0x4e, 0x23, 0x0e, 0xc1, 0xa2, 0xfc, 0x89, 0x8a, 0x07, 0x33, 0x1e, + 0x4c, 0x24, 0x97, 0xe8, 0x03, 0x15, 0x47, 0xfe, 0xb0, 0x0f, 0xc6, 0xcc, 0x29, 0x85, 0xe6, 0xa1, + 0x24, 0x98, 0xa8, 0xfc, 0x07, 0x6a, 0x95, 0xac, 0x49, 0x04, 0x8e, 0x69, 0x58, 0xda, 0x0b, 0x56, + 0x5c, 0x73, 0x0f, 0x8e, 0xd3, 0x5e, 0x28, 0x0c, 0xd6, 0xa8, 0xe8, 0xc5, 0x6a, 0xd3, 0xf7, 0x23, + 0x75, 0x20, 0xa9, 0x79, 0xb7, 0xc8, 0xa0, 0x58, 0x60, 0xe9, 0x41, 0xb4, 0x4b, 0x02, 0x8f, 0x34, + 0xcc, 0xb8, 0xc3, 0xea, 0x20, 0xba, 0xa6, 0x23, 0xb1, 0x49, 0x4b, 0x8f, 0x53, 0x3f, 0x64, 0x13, + 0x59, 0x5c, 0xdf, 0x62, 0x77, 0xeb, 0x2a, 0x7f, 0x80, 0x2d, 0xf1, 0xe8, 0x63, 0xf0, 0x90, 0x8a, + 0xad, 0x84, 0xb9, 0x35, 0x43, 0xd6, 0x38, 0x60, 0x68, 0x5b, 0x1e, 0x5a, 0xca, 0x26, 0xc3, 0x79, + 0xe5, 0xd1, 0xab, 0x30, 0x26, 0x44, 0x7c, 0xc9, 0x71, 0xd0, 0xf4, 0x30, 0xba, 0x66, 0x60, 0x71, + 0x82, 0x5a, 0x46, 0x4e, 0x66, 0x52, 0xb6, 0xe4, 0x30, 0x94, 0x8e, 0x9c, 0xac, 0xe3, 0x71, 0xaa, + 0x04, 0x5a, 0x80, 0x71, 0x2e, 0x83, 0xb9, 0xde, 0x36, 0x1f, 0x13, 0xf1, 0x98, 0x4b, 0x2d, 0xa9, + 0x1b, 0x26, 0x1a, 0x27, 0xe9, 0xd1, 0x4b, 0x30, 0xe2, 0x04, 0xb5, 0x1d, 0x37, 0x22, 0xb5, 0xa8, + 0x1d, 0xf0, 0x57, 0x5e, 0x9a, 0x8b, 0xd6, 0x82, 0x86, 0xc3, 0x06, 0xa5, 0xfd, 0x16, 0x4c, 0x65, + 0x44, 0x66, 0xa0, 0x13, 0xc7, 0x69, 0xb9, 0xf2, 0x9b, 0x12, 0x1e, 0xce, 0x0b, 0x95, 0xb2, 0xfc, + 0x1a, 0x8d, 0x8a, 0xce, 0x4e, 0x16, 0xc1, 0x41, 0x4b, 0xd5, 0xa8, 0x66, 0xe7, 0xaa, 0x44, 0xe0, + 0x98, 0xc6, 0xfe, 0xcf, 0x05, 0x18, 0xcf, 0xb0, 0xad, 0xb0, 0x74, 0x81, 0x89, 0x4b, 0x4a, 0x9c, + 0x1d, 0xd0, 0x0c, 0xc4, 0x5d, 0x38, 0x42, 0x20, 0xee, 0x62, 0xb7, 0x40, 0xdc, 0x7d, 0x6f, 0x27, + 0x10, 0xb7, 0xd9, 0x63, 0xfd, 0x3d, 0xf5, 0x58, 0x46, 0xf0, 0xee, 0x81, 0x23, 0x06, 0xef, 0x36, + 0x3a, 0x7d, 0xb0, 0x87, 0x4e, 0xff, 0xd1, 0x02, 0x4c, 0x24, 0x5d, 0x49, 0x4f, 0x40, 0x6f, 0xfb, + 0x9a, 0xa1, 0xb7, 0xbd, 0xd4, 0xcb, 0xe3, 0xdb, 0x5c, 0x1d, 0x2e, 0x4e, 0xe8, 0x70, 0xdf, 0xdb, + 0x13, 0xb7, 0xce, 0xfa, 0xdc, 0x9f, 0x2a, 0xc0, 0xe9, 0xcc, 0xd7, 0xbf, 0x27, 0xd0, 0x37, 0x37, + 0x8c, 0xbe, 0x79, 0xb6, 0xe7, 0x87, 0xc9, 0xb9, 0x1d, 0x74, 0x3b, 0xd1, 0x41, 0xf3, 0xbd, 0xb3, + 0xec, 0xdc, 0x4b, 0xdf, 0x28, 0xc2, 0xf9, 0xcc, 0x72, 0xb1, 0xda, 0x73, 0xd5, 0x50, 0x7b, 0x3e, + 0x97, 0x50, 0x7b, 0xda, 0x9d, 0x4b, 0x1f, 0x8f, 0x1e, 0x54, 0x3c, 0xd0, 0x65, 0x61, 0x06, 0xee, + 0x53, 0x07, 0x6a, 0x3c, 0xd0, 0x55, 0x8c, 0xb0, 0xc9, 0xf7, 0x3b, 0x49, 0xf7, 0xf9, 0xcf, 0x2d, + 0x38, 0x9b, 0x39, 0x36, 0x27, 0xa0, 0xeb, 0x5a, 0x37, 0x75, 0x5d, 0x4f, 0xf5, 0x3c, 0x5b, 0x73, + 0x94, 0x5f, 0x3f, 0xd3, 0x9f, 0xf3, 0x2d, 0xec, 0x26, 0x7f, 0x03, 0x86, 0x9d, 0x5a, 0x8d, 0x84, + 0xe1, 0x9a, 0x5f, 0x57, 0xb1, 0x86, 0x9f, 0x65, 0xf7, 0xac, 0x18, 0x7c, 0x78, 0x30, 0x3b, 0x93, + 0x64, 0x11, 0xa3, 0xb1, 0xce, 0x01, 0x7d, 0x12, 0x86, 0x42, 0x71, 0x6e, 0x8a, 0xb1, 0x7f, 0xbe, + 0xc7, 0xce, 0x71, 0x36, 0x49, 0xc3, 0x0c, 0x86, 0xa4, 0x34, 0x15, 0x8a, 0xa5, 0x19, 0x38, 0xa5, + 0x70, 0xac, 0x81, 0x53, 0x9e, 0x03, 0xd8, 0x53, 0x97, 0x81, 0xa4, 0xfe, 0x41, 0xbb, 0x26, 0x68, + 0x54, 0xe8, 0x23, 0x30, 0x11, 0xf2, 0x68, 0x81, 0x4b, 0x0d, 0x27, 0x64, 0xef, 0x68, 0xc4, 0x2c, + 0x64, 0x01, 0x97, 0xaa, 0x09, 0x1c, 0x4e, 0x51, 0xa3, 0x55, 0x59, 0x2b, 0x0b, 0x6d, 0xc8, 0x27, + 0xe6, 0xc5, 0xb8, 0x46, 0x91, 0xac, 0xf8, 0x54, 0xb2, 0xfb, 0x59, 0xc7, 0x6b, 0x25, 0xd1, 0x27, + 0x01, 0xe8, 0xf4, 0x11, 0x7a, 0x88, 0xc1, 0xfc, 0xcd, 0x93, 0xee, 0x2a, 0xf5, 0x4c, 0xe7, 0x66, + 0xf6, 0xa6, 0x76, 0x59, 0x31, 0xc1, 0x1a, 0x43, 0xe4, 0xc0, 0x68, 0xfc, 0x2f, 0xce, 0xe5, 0x79, + 0x29, 0xb7, 0x86, 0x24, 0x73, 0xa6, 0xf2, 0x5e, 0xd6, 0x59, 0x60, 0x93, 0xa3, 0xfd, 0xe3, 0x83, + 0xf0, 0x70, 0x87, 0x6d, 0x18, 0x2d, 0x98, 0xa6, 0xde, 0xa7, 0x93, 0xf7, 0xf7, 0x99, 0xcc, 0xc2, + 0xc6, 0x85, 0x3e, 0x31, 0xdb, 0x0b, 0x6f, 0x7b, 0xb6, 0xff, 0xb0, 0xa5, 0x69, 0x56, 0xb8, 0x53, + 0xe9, 0x87, 0x8f, 0x78, 0xbc, 0x1c, 0xa3, 0xaa, 0x65, 0x2b, 0x43, 0x5f, 0xf1, 0x5c, 0xcf, 0xcd, + 0xe9, 0x5d, 0x81, 0xf1, 0x35, 0x0b, 0x90, 0xd0, 0xac, 0x90, 0xba, 0x5a, 0x4b, 0x42, 0x95, 0x71, + 0xe5, 0xa8, 0xdf, 0xbf, 0x90, 0xe2, 0xc4, 0x7b, 0xe2, 0x65, 0x79, 0x0e, 0xa4, 0x09, 0xba, 0xf6, + 0x49, 0x46, 0xf3, 0xd0, 0xc7, 0x58, 0x20, 0x5d, 0xf7, 0x2d, 0x21, 0xfc, 0x88, 0xb5, 0xf6, 0xa2, + 0x08, 0xa2, 0xab, 0xe0, 0x54, 0xca, 0xcd, 0x6c, 0xae, 0x4e, 0x84, 0x0d, 0x56, 0x27, 0x7b, 0xf5, + 0x6e, 0xc3, 0x43, 0x39, 0x5d, 0xf6, 0x40, 0x6f, 0xe0, 0xbf, 0x6d, 0xc1, 0xb9, 0x8e, 0x11, 0x61, + 0xbe, 0x0d, 0x65, 0x43, 0xfb, 0xf3, 0x16, 0x64, 0x0f, 0xb6, 0xe1, 0x51, 0x36, 0x0f, 0xa5, 0x5a, + 0x22, 0xeb, 0x60, 0x1c, 0x1b, 0x41, 0x65, 0x1c, 0x8c, 0x69, 0x0c, 0xc7, 0xb1, 0x42, 0x57, 0xc7, + 0xb1, 0x5f, 0xb3, 0x20, 0xb5, 0xbf, 0x9f, 0x80, 0xa0, 0x51, 0x36, 0x05, 0x8d, 0xc7, 0x7b, 0xe9, + 0xcd, 0x1c, 0x19, 0xe3, 0x4f, 0xc6, 0xe1, 0x4c, 0xce, 0x8b, 0xbc, 0x3d, 0x98, 0xdc, 0xae, 0x11, + 0xf3, 0x71, 0x75, 0xa7, 0xa0, 0x43, 0x1d, 0x5f, 0x62, 0xf3, 0x64, 0x8f, 0x29, 0x12, 0x9c, 0xae, + 0x02, 0x7d, 0xde, 0x82, 0x53, 0xce, 0x9d, 0x70, 0x85, 0x0a, 0x8c, 0x6e, 0x6d, 0xb1, 0xe1, 0xd7, + 0x76, 0xe9, 0x69, 0x2c, 0x17, 0xc2, 0x0b, 0x99, 0x4a, 0xbc, 0xdb, 0xd5, 0x14, 0xbd, 0x51, 0x3d, + 0x4b, 0xed, 0x9b, 0x45, 0x85, 0x33, 0xeb, 0x42, 0x58, 0x64, 0x4f, 0xa0, 0xd7, 0xd1, 0x0e, 0xcf, + 0xff, 0xb3, 0x9e, 0x4e, 0x72, 0x09, 0x48, 0x62, 0xb0, 0xe2, 0x83, 0x3e, 0x0d, 0xa5, 0x6d, 0xf9, + 0xd2, 0x37, 0x43, 0xc2, 0x8a, 0x3b, 0xb2, 0xf3, 0xfb, 0x67, 0x6e, 0x89, 0x57, 0x44, 0x38, 0x66, + 0x8a, 0x5e, 0x85, 0xa2, 0xb7, 0x15, 0x76, 0xca, 0x8e, 0x9b, 0x70, 0xb9, 0xe4, 0x41, 0x36, 0xd6, + 0x57, 0xab, 0x98, 0x16, 0x44, 0x57, 0xa1, 0x18, 0x6c, 0xd6, 0x85, 0x06, 0x3a, 0x73, 0x91, 0xe2, + 0xc5, 0xe5, 0x9c, 0x56, 0x31, 0x4e, 0x78, 0x71, 0x19, 0x53, 0x16, 0xa8, 0x02, 0xfd, 0xec, 0x19, + 0x9b, 0x90, 0x67, 0x32, 0x6f, 0x6e, 0x1d, 0x9e, 0x83, 0xf2, 0x48, 0x1c, 0x8c, 0x00, 0x73, 0x46, + 0x68, 0x03, 0x06, 0x6a, 0x2c, 0x93, 0xaa, 0x10, 0x60, 0xde, 0x97, 0xa9, 0x6b, 0xee, 0x90, 0x62, + 0x56, 0xa8, 0x5e, 0x19, 0x05, 0x16, 0xbc, 0x18, 0x57, 0xd2, 0xda, 0xd9, 0x0a, 0x45, 0xa6, 0xf1, + 0x6c, 0xae, 0x1d, 0x32, 0x27, 0x0b, 0xae, 0x8c, 0x02, 0x0b, 0x5e, 0xe8, 0x65, 0x28, 0x6c, 0xd5, + 0xc4, 0x13, 0xb5, 0x4c, 0xa5, 0xb3, 0x19, 0x27, 0x65, 0x71, 0xe0, 0xde, 0xc1, 0x6c, 0x61, 0x75, + 0x09, 0x17, 0xb6, 0x6a, 0x68, 0x1d, 0x06, 0xb7, 0x78, 0x64, 0x05, 0xa1, 0x57, 0x7e, 0x32, 0x3b, + 0xe8, 0x43, 0x2a, 0xf8, 0x02, 0x7f, 0xee, 0x24, 0x10, 0x58, 0x32, 0x61, 0xc9, 0x08, 0x54, 0x84, + 0x08, 0x11, 0xa0, 0x6e, 0xee, 0x68, 0x51, 0x3d, 0xb8, 0x7c, 0x19, 0xc7, 0x99, 0xc0, 0x1a, 0x47, + 0x3a, 0xab, 0x9d, 0xb7, 0xda, 0x01, 0x8b, 0x02, 0x2e, 0x22, 0x19, 0x65, 0xce, 0xea, 0x05, 0x49, + 0xd4, 0x69, 0x56, 0x2b, 0x22, 0x1c, 0x33, 0x45, 0xbb, 0x30, 0xba, 0x17, 0xb6, 0x76, 0x88, 0x5c, + 0xd2, 0x2c, 0xb0, 0x51, 0x8e, 0x7c, 0x74, 0x4b, 0x10, 0xba, 0x41, 0xd4, 0x76, 0x1a, 0xa9, 0x5d, + 0x88, 0xc9, 0xb2, 0xb7, 0x74, 0x66, 0xd8, 0xe4, 0x4d, 0xbb, 0xff, 0xcd, 0xb6, 0xbf, 0xb9, 0x1f, + 0x11, 0x11, 0x57, 0x2e, 0xb3, 0xfb, 0x5f, 0xe7, 0x24, 0xe9, 0xee, 0x17, 0x08, 0x2c, 0x99, 0xa0, + 0x5b, 0xa2, 0x7b, 0xd8, 0xee, 0x39, 0x91, 0x1f, 0xfc, 0x75, 0x41, 0x12, 0xe5, 0x74, 0x0a, 0xdb, + 0x2d, 0x63, 0x56, 0x6c, 0x97, 0x6c, 0xed, 0xf8, 0x91, 0xef, 0x25, 0x76, 0xe8, 0xc9, 0xfc, 0x5d, + 0xb2, 0x92, 0x41, 0x9f, 0xde, 0x25, 0xb3, 0xa8, 0x70, 0x66, 0x5d, 0xa8, 0x0e, 0x63, 0x2d, 0x3f, + 0x88, 0xee, 0xf8, 0x81, 0x9c, 0x5f, 0xa8, 0x83, 0x5e, 0xcc, 0xa0, 0x14, 0x35, 0xb2, 0x90, 0x8d, + 0x26, 0x06, 0x27, 0x78, 0xa2, 0x8f, 0xc2, 0x60, 0x58, 0x73, 0x1a, 0xa4, 0x7c, 0x63, 0x7a, 0x2a, + 0xff, 0xf8, 0xa9, 0x72, 0x92, 0x9c, 0xd9, 0xc5, 0x03, 0x63, 0x70, 0x12, 0x2c, 0xd9, 0xa1, 0x55, + 0xe8, 0x67, 0xc9, 0xe6, 0x58, 0x10, 0xc4, 0x9c, 0x18, 0xb6, 0x29, 0x07, 0x78, 0xbe, 0x37, 0x31, + 0x30, 0xe6, 0xc5, 0xe9, 0x1a, 0x10, 0xd7, 0x43, 0x3f, 0x9c, 0x3e, 0x9d, 0xbf, 0x06, 0xc4, 0xad, + 0xf2, 0x46, 0xb5, 0xd3, 0x1a, 0x50, 0x44, 0x38, 0x66, 0x4a, 0x77, 0x66, 0xba, 0x9b, 0x9e, 0xe9, + 0xe0, 0xb9, 0x95, 0xbb, 0x97, 0xb2, 0x9d, 0x99, 0xee, 0xa4, 0x94, 0x85, 0xfd, 0xfb, 0x83, 0x69, + 0x99, 0x85, 0x29, 0x14, 0xbe, 0xd7, 0x4a, 0xd9, 0x9a, 0xdf, 0xdf, 0xab, 0x7e, 0xf3, 0x18, 0xaf, + 0x42, 0x9f, 0xb7, 0xe0, 0x4c, 0x2b, 0xf3, 0x43, 0x84, 0x00, 0xd0, 0x9b, 0x9a, 0x94, 0x7f, 0xba, + 0x0a, 0x98, 0x99, 0x8d, 0xc7, 0x39, 0x35, 0x25, 0xaf, 0x9b, 0xc5, 0xb7, 0x7d, 0xdd, 0x5c, 0x83, + 0xa1, 0x1a, 0xbf, 0x8a, 0x74, 0xcc, 0x2c, 0x9e, 0xbc, 0x7b, 0x33, 0x51, 0x42, 0xdc, 0x61, 0xb6, + 0xb0, 0x62, 0x81, 0x7e, 0xc4, 0x82, 0x73, 0xc9, 0xa6, 0x63, 0xc2, 0xd0, 0x22, 0xca, 0x26, 0xd7, + 0x65, 0xac, 0x8a, 0xef, 0x4f, 0xc9, 0xff, 0x06, 0xf1, 0x61, 0x37, 0x02, 0xdc, 0xb9, 0x32, 0xb4, + 0x9c, 0xa1, 0x4c, 0x19, 0x30, 0x0d, 0x48, 0x3d, 0x28, 0x54, 0x5e, 0x80, 0x91, 0xa6, 0xdf, 0xf6, + 0x22, 0xe1, 0xe8, 0x25, 0x9c, 0x4e, 0x98, 0xb3, 0xc5, 0x9a, 0x06, 0xc7, 0x06, 0x55, 0x42, 0x0d, + 0x33, 0x74, 0xdf, 0x6a, 0x98, 0x37, 0x60, 0xc4, 0xd3, 0x3c, 0x93, 0x85, 0x3c, 0x70, 0x31, 0x3f, + 0x42, 0xae, 0xee, 0xc7, 0xcc, 0x5b, 0xa9, 0x43, 0xb0, 0xc1, 0xed, 0x64, 0x3d, 0xc0, 0xbe, 0x6c, + 0x65, 0x08, 0xf5, 0x5c, 0x15, 0xf3, 0x21, 0x53, 0x15, 0x73, 0x31, 0xa9, 0x8a, 0x49, 0x19, 0x0f, + 0x0c, 0x2d, 0x4c, 0xef, 0x09, 0x80, 0x7a, 0x8d, 0xb2, 0x69, 0x37, 0xe0, 0x42, 0xb7, 0x63, 0x89, + 0x79, 0xfc, 0xd5, 0x95, 0xa9, 0x38, 0xf6, 0xf8, 0xab, 0x97, 0x97, 0x31, 0xc3, 0xf4, 0x1a, 0xbf, + 0xc9, 0xfe, 0x8f, 0x16, 0x14, 0x2b, 0x7e, 0xfd, 0x04, 0x2e, 0xbc, 0x1f, 0x36, 0x2e, 0xbc, 0x0f, + 0x67, 0x1f, 0x88, 0xf5, 0x5c, 0xd3, 0xc7, 0x4a, 0xc2, 0xf4, 0x71, 0x2e, 0x8f, 0x41, 0x67, 0x43, + 0xc7, 0x4f, 0x17, 0x61, 0xb8, 0xe2, 0xd7, 0x95, 0xbb, 0xfd, 0x3f, 0xba, 0x1f, 0x77, 0xfb, 0xdc, + 0x34, 0x16, 0x1a, 0x67, 0xe6, 0x28, 0x28, 0x5f, 0x1a, 0x7f, 0x9b, 0x79, 0xdd, 0xdf, 0x26, 0xee, + 0xf6, 0x4e, 0x44, 0xea, 0xc9, 0xcf, 0x39, 0x39, 0xaf, 0xfb, 0xdf, 0x2f, 0xc0, 0x78, 0xa2, 0x76, + 0xd4, 0x80, 0xd1, 0x86, 0xae, 0x58, 0x17, 0xf3, 0xf4, 0xbe, 0x74, 0xf2, 0xc2, 0x6b, 0x59, 0x03, + 0x61, 0x93, 0x39, 0x9a, 0x03, 0x50, 0x96, 0x66, 0xa9, 0x5e, 0x65, 0x52, 0xbf, 0x32, 0x45, 0x87, + 0x58, 0xa3, 0x40, 0x2f, 0xc2, 0x70, 0xe4, 0xb7, 0xfc, 0x86, 0xbf, 0xbd, 0x7f, 0x8d, 0xc8, 0xd0, + 0x5e, 0xca, 0x17, 0x71, 0x23, 0x46, 0x61, 0x9d, 0x0e, 0xdd, 0x85, 0x49, 0xc5, 0xa4, 0x7a, 0x0c, + 0xc6, 0x06, 0xa6, 0x55, 0x58, 0x4f, 0x72, 0xc4, 0xe9, 0x4a, 0xec, 0x9f, 0x2b, 0xf2, 0x2e, 0xf6, + 0x22, 0xf7, 0xdd, 0xd5, 0xf0, 0xce, 0x5e, 0x0d, 0xdf, 0xb0, 0x60, 0x82, 0xd6, 0xce, 0x1c, 0xad, + 0xe4, 0x31, 0xaf, 0x62, 0x72, 0x5b, 0x1d, 0x62, 0x72, 0x5f, 0xa4, 0xbb, 0x66, 0xdd, 0x6f, 0x47, + 0x42, 0x77, 0xa7, 0x6d, 0x8b, 0x14, 0x8a, 0x05, 0x56, 0xd0, 0x91, 0x20, 0x10, 0x8f, 0x43, 0x75, + 0x3a, 0x12, 0x04, 0x58, 0x60, 0x65, 0xc8, 0xee, 0xbe, 0xec, 0x90, 0xdd, 0x3c, 0xf2, 0xaa, 0x70, + 0xc9, 0x11, 0x02, 0x97, 0x16, 0x79, 0x55, 0xfa, 0xea, 0xc4, 0x34, 0xf6, 0xd7, 0x8a, 0x30, 0x52, + 0xf1, 0xeb, 0xb1, 0x95, 0xf9, 0x05, 0xc3, 0xca, 0x7c, 0x21, 0x61, 0x65, 0x9e, 0xd0, 0x69, 0xdf, + 0xb5, 0x29, 0x7f, 0xab, 0x6c, 0xca, 0xbf, 0x6a, 0xb1, 0x51, 0x5b, 0x5e, 0xaf, 0x72, 0xbf, 0x3d, + 0x74, 0x19, 0x86, 0xd9, 0x06, 0xc3, 0x5e, 0x23, 0x4b, 0xd3, 0x2b, 0x4b, 0x45, 0xb5, 0x1e, 0x83, + 0xb1, 0x4e, 0x83, 0x2e, 0xc1, 0x50, 0x48, 0x9c, 0xa0, 0xb6, 0xa3, 0x76, 0x57, 0x61, 0x27, 0xe5, + 0x30, 0xac, 0xb0, 0xe8, 0xf5, 0x38, 0xe8, 0x67, 0x31, 0xff, 0x75, 0xa3, 0xde, 0x1e, 0xbe, 0x44, + 0xf2, 0x23, 0x7d, 0xda, 0xb7, 0x01, 0xa5, 0xe9, 0x7b, 0x08, 0x4b, 0x37, 0x6b, 0x86, 0xa5, 0x2b, + 0xa5, 0x42, 0xd2, 0xfd, 0x99, 0x05, 0x63, 0x15, 0xbf, 0x4e, 0x97, 0xee, 0x77, 0xd2, 0x3a, 0xd5, + 0x23, 0x1e, 0x0f, 0x74, 0x88, 0x78, 0xfc, 0x18, 0xf4, 0x57, 0xfc, 0x7a, 0xb9, 0xd2, 0x29, 0xb4, + 0x80, 0xfd, 0x57, 0x2d, 0x18, 0xac, 0xf8, 0xf5, 0x13, 0x30, 0x0b, 0x7c, 0xc8, 0x34, 0x0b, 0x3c, + 0x94, 0x33, 0x6f, 0x72, 0x2c, 0x01, 0x7f, 0xa5, 0x0f, 0x46, 0x69, 0x3b, 0xfd, 0x6d, 0x39, 0x94, + 0x46, 0xb7, 0x59, 0x3d, 0x74, 0x1b, 0x95, 0xc2, 0xfd, 0x46, 0xc3, 0xbf, 0x93, 0x1c, 0xd6, 0x55, + 0x06, 0xc5, 0x02, 0x8b, 0x9e, 0x81, 0xa1, 0x56, 0x40, 0xf6, 0x5c, 0x5f, 0x88, 0xb7, 0x9a, 0x91, + 0xa5, 0x22, 0xe0, 0x58, 0x51, 0xd0, 0x6b, 0x61, 0xe8, 0x7a, 0xf4, 0x28, 0xaf, 0xf9, 0x5e, 0x9d, + 0x6b, 0xce, 0x8b, 0x22, 0x2d, 0x87, 0x06, 0xc7, 0x06, 0x15, 0xba, 0x0d, 0x25, 0xf6, 0x9f, 0x6d, + 0x3b, 0x47, 0x4f, 0xf0, 0x2a, 0x12, 0xfe, 0x09, 0x06, 0x38, 0xe6, 0x85, 0x9e, 0x03, 0x88, 0x64, + 0x68, 0xfb, 0x50, 0x04, 0x5a, 0x53, 0x57, 0x01, 0x15, 0xf4, 0x3e, 0xc4, 0x1a, 0x15, 0x7a, 0x1a, + 0x4a, 0x91, 0xe3, 0x36, 0xae, 0xbb, 0x1e, 0x09, 0x99, 0x46, 0xbc, 0x28, 0xf3, 0xee, 0x09, 0x20, + 0x8e, 0xf1, 0x54, 0x14, 0x63, 0x41, 0x38, 0x78, 0x7a, 0xe8, 0x21, 0x46, 0xcd, 0x44, 0xb1, 0xeb, + 0x0a, 0x8a, 0x35, 0x0a, 0xb4, 0x03, 0x8f, 0xb8, 0x1e, 0x4b, 0x61, 0x41, 0xaa, 0xbb, 0x6e, 0x6b, + 0xe3, 0x7a, 0xf5, 0x16, 0x09, 0xdc, 0xad, 0xfd, 0x45, 0xa7, 0xb6, 0x4b, 0x3c, 0x99, 0xba, 0xf3, + 0x71, 0xd1, 0xc4, 0x47, 0xca, 0x1d, 0x68, 0x71, 0x47, 0x4e, 0xf6, 0xf3, 0x6c, 0xbe, 0xdf, 0xa8, + 0xa2, 0xf7, 0x1a, 0x5b, 0xc7, 0x19, 0x7d, 0xeb, 0x38, 0x3c, 0x98, 0x1d, 0xb8, 0x51, 0xd5, 0x62, + 0x48, 0xbc, 0x04, 0xa7, 0x2b, 0x7e, 0xbd, 0xe2, 0x07, 0xd1, 0xaa, 0x1f, 0xdc, 0x71, 0x82, 0xba, + 0x9c, 0x5e, 0xb3, 0x32, 0x8a, 0x06, 0xdd, 0x3f, 0xfb, 0xf9, 0xee, 0x62, 0x44, 0xc8, 0x78, 0x9e, + 0x49, 0x6c, 0x47, 0x7c, 0xfb, 0x55, 0x63, 0xb2, 0x83, 0x4a, 0x02, 0x73, 0xc5, 0x89, 0x08, 0xba, + 0xc1, 0x92, 0x5b, 0xc7, 0xc7, 0xa8, 0x28, 0xfe, 0x94, 0x96, 0xdc, 0x3a, 0x46, 0x66, 0x9e, 0xbb, + 0x66, 0x79, 0xfb, 0x73, 0xa2, 0x12, 0x7e, 0x07, 0xe7, 0xfe, 0x75, 0xbd, 0x64, 0xb7, 0x95, 0x59, + 0x22, 0x0a, 0xf9, 0xe9, 0x05, 0xb8, 0xd5, 0xb3, 0x63, 0x96, 0x08, 0xfb, 0x45, 0x98, 0xa4, 0x57, + 0x3f, 0x25, 0x47, 0xb1, 0x8f, 0xec, 0x1e, 0xcd, 0xe3, 0x3f, 0xf5, 0xb3, 0x73, 0x20, 0x91, 0xfe, + 0x04, 0x7d, 0x0a, 0xc6, 0x42, 0x72, 0xdd, 0xf5, 0xda, 0x77, 0xa5, 0xe2, 0xa5, 0xc3, 0x9b, 0xc3, + 0xea, 0x8a, 0x4e, 0xc9, 0xd5, 0xb7, 0x26, 0x0c, 0x27, 0xb8, 0xa1, 0x26, 0x8c, 0xdd, 0x71, 0xbd, + 0xba, 0x7f, 0x27, 0x94, 0xfc, 0x87, 0xf2, 0xb5, 0xb8, 0xb7, 0x39, 0x65, 0xa2, 0x8d, 0x46, 0x75, + 0xb7, 0x0d, 0x66, 0x38, 0xc1, 0x9c, 0xae, 0xb5, 0xa0, 0xed, 0x2d, 0x84, 0x37, 0x43, 0x12, 0x88, + 0xe4, 0xea, 0x6c, 0xad, 0x61, 0x09, 0xc4, 0x31, 0x9e, 0xae, 0x35, 0xf6, 0xe7, 0x4a, 0xe0, 0xb7, + 0x79, 0xae, 0x0d, 0xb1, 0xd6, 0xb0, 0x82, 0x62, 0x8d, 0x82, 0xee, 0x45, 0xec, 0xdf, 0xba, 0xef, + 0x61, 0xdf, 0x8f, 0xe4, 0xee, 0xc5, 0x3c, 0x11, 0x34, 0x38, 0x36, 0xa8, 0xd0, 0x2a, 0xa0, 0xb0, + 0xdd, 0x6a, 0x35, 0x98, 0x33, 0x93, 0xd3, 0x60, 0xac, 0xb8, 0x97, 0x47, 0x91, 0xc7, 0x0a, 0xae, + 0xa6, 0xb0, 0x38, 0xa3, 0x04, 0x3d, 0x96, 0xb6, 0x44, 0x53, 0xfb, 0x59, 0x53, 0xb9, 0xc5, 0xa7, + 0xca, 0xdb, 0x29, 0x71, 0x68, 0x05, 0x06, 0xc3, 0xfd, 0xb0, 0x16, 0x89, 0xd0, 0x8e, 0x39, 0x19, + 0xae, 0xaa, 0x8c, 0x44, 0x4b, 0xb0, 0xc8, 0x8b, 0x60, 0x59, 0x16, 0xd5, 0x60, 0x4a, 0x70, 0x5c, + 0xda, 0x71, 0x3c, 0x95, 0x2f, 0x88, 0xfb, 0x74, 0x5f, 0xbe, 0x77, 0x30, 0x3b, 0x25, 0x6a, 0xd6, + 0xd1, 0x87, 0x07, 0xb3, 0x67, 0x2a, 0x7e, 0x3d, 0x03, 0x83, 0xb3, 0xb8, 0xf1, 0xc9, 0x57, 0xab, + 0xf9, 0xcd, 0x56, 0x25, 0xf0, 0xb7, 0xdc, 0x06, 0xe9, 0x64, 0x35, 0xab, 0x1a, 0x94, 0x62, 0xf2, + 0x19, 0x30, 0x9c, 0xe0, 0x66, 0x7f, 0x8e, 0x89, 0x6e, 0x2c, 0x9f, 0x78, 0xd4, 0x0e, 0x08, 0x6a, + 0xc2, 0x68, 0x8b, 0x2d, 0x6e, 0x91, 0x01, 0x43, 0xcc, 0xf5, 0x17, 0x7a, 0xd4, 0xfe, 0xdc, 0xa1, + 0x27, 0x9e, 0xe9, 0x19, 0x55, 0xd1, 0xd9, 0x61, 0x93, 0xbb, 0xfd, 0x2f, 0xcf, 0xb2, 0xc3, 0xbf, + 0xca, 0x55, 0x3a, 0x83, 0xe2, 0x09, 0x89, 0xb8, 0x45, 0xce, 0xe4, 0xeb, 0x16, 0xe3, 0x61, 0x11, + 0xcf, 0x50, 0xb0, 0x2c, 0x8b, 0x3e, 0x09, 0x63, 0xf4, 0x52, 0xa6, 0x0e, 0xe0, 0x70, 0xfa, 0x54, + 0x7e, 0xa8, 0x0f, 0x45, 0xa5, 0x67, 0xc7, 0xd1, 0x0b, 0xe3, 0x04, 0x33, 0xf4, 0x3a, 0xf3, 0x44, + 0x92, 0xac, 0x0b, 0xbd, 0xb0, 0xd6, 0x9d, 0x8e, 0x24, 0x5b, 0x8d, 0x09, 0x6a, 0xc3, 0x54, 0x3a, + 0x97, 0x5e, 0x38, 0x6d, 0xe7, 0x4b, 0xb7, 0xe9, 0x74, 0x78, 0x71, 0x1a, 0x93, 0x34, 0x2e, 0xc4, + 0x59, 0xfc, 0xd1, 0x75, 0x18, 0x15, 0x49, 0xb5, 0xc5, 0xcc, 0x2d, 0x1a, 0x2a, 0xcf, 0x51, 0xac, + 0x23, 0x0f, 0x93, 0x00, 0x6c, 0x16, 0x46, 0xdb, 0x70, 0x4e, 0x4b, 0x72, 0x75, 0x25, 0x70, 0x98, + 0xdf, 0x82, 0xcb, 0xb6, 0x53, 0x4d, 0x2c, 0x79, 0xf4, 0xde, 0xc1, 0xec, 0xb9, 0x8d, 0x4e, 0x84, + 0xb8, 0x33, 0x1f, 0x74, 0x03, 0x4e, 0xf3, 0x87, 0xea, 0xcb, 0xc4, 0xa9, 0x37, 0x5c, 0x4f, 0xc9, + 0x3d, 0x7c, 0xc9, 0x9f, 0xbd, 0x77, 0x30, 0x7b, 0x7a, 0x21, 0x8b, 0x00, 0x67, 0x97, 0x43, 0x1f, + 0x82, 0x52, 0xdd, 0x0b, 0x45, 0x1f, 0x0c, 0x18, 0x79, 0xc4, 0x4a, 0xcb, 0xeb, 0x55, 0xf5, 0xfd, + 0xf1, 0x1f, 0x1c, 0x17, 0x40, 0xdb, 0x5c, 0x2d, 0xae, 0x94, 0x35, 0x83, 0xa9, 0x40, 0x5d, 0x49, + 0x7d, 0xa6, 0xf1, 0x54, 0x95, 0xdb, 0x83, 0xd4, 0x0b, 0x0e, 0xe3, 0x15, 0xab, 0xc1, 0x18, 0xbd, + 0x06, 0x48, 0xc4, 0xab, 0x5f, 0xa8, 0xb1, 0xf4, 0x2a, 0xcc, 0x8a, 0x30, 0x64, 0x3e, 0x9e, 0xac, + 0xa6, 0x28, 0x70, 0x46, 0x29, 0x74, 0x95, 0xee, 0x2a, 0x3a, 0x54, 0xec, 0x5a, 0x2a, 0xeb, 0xe3, + 0x32, 0x69, 0x05, 0x84, 0xf9, 0x61, 0x99, 0x1c, 0x71, 0xa2, 0x1c, 0xaa, 0xc3, 0x23, 0x4e, 0x3b, + 0xf2, 0x99, 0xc5, 0xc1, 0x24, 0xdd, 0xf0, 0x77, 0x89, 0xc7, 0x8c, 0x7d, 0x43, 0x8b, 0x17, 0xa8, + 0x60, 0xb5, 0xd0, 0x81, 0x0e, 0x77, 0xe4, 0x42, 0x05, 0x62, 0x95, 0xe6, 0x19, 0xcc, 0xf0, 0x63, + 0x19, 0xa9, 0x9e, 0x5f, 0x84, 0xe1, 0x1d, 0x3f, 0x8c, 0xd6, 0x49, 0x74, 0xc7, 0x0f, 0x76, 0x45, + 0x18, 0xdd, 0x38, 0x28, 0x79, 0x8c, 0xc2, 0x3a, 0x1d, 0xbd, 0xf1, 0x32, 0x57, 0x94, 0xf2, 0x32, + 0xf3, 0x02, 0x18, 0x8a, 0xf7, 0x98, 0xab, 0x1c, 0x8c, 0x25, 0x5e, 0x92, 0x96, 0x2b, 0x4b, 0xcc, + 0xa2, 0x9f, 0x20, 0x2d, 0x57, 0x96, 0xb0, 0xc4, 0xd3, 0xe9, 0x1a, 0xee, 0x38, 0x01, 0xa9, 0x04, + 0x7e, 0x8d, 0x84, 0x5a, 0x28, 0xfc, 0x87, 0x79, 0x90, 0x60, 0x3a, 0x5d, 0xab, 0x59, 0x04, 0x38, + 0xbb, 0x1c, 0x22, 0xe9, 0x04, 0x6f, 0x63, 0xf9, 0xa6, 0x98, 0xb4, 0x3c, 0xd3, 0x63, 0x8e, 0x37, + 0x0f, 0x26, 0x54, 0x6a, 0x39, 0x1e, 0x16, 0x38, 0x9c, 0x1e, 0x67, 0x73, 0xbb, 0xf7, 0x98, 0xc2, + 0xca, 0xb8, 0x55, 0x4e, 0x70, 0xc2, 0x29, 0xde, 0x46, 0x84, 0xb9, 0x89, 0xae, 0x11, 0xe6, 0xe6, + 0xa1, 0x14, 0xb6, 0x37, 0xeb, 0x7e, 0xd3, 0x71, 0x3d, 0x66, 0xd1, 0xd7, 0xae, 0x5e, 0x55, 0x89, + 0xc0, 0x31, 0x0d, 0x5a, 0x85, 0x21, 0x47, 0x5a, 0xae, 0x50, 0x7e, 0x4c, 0x21, 0x65, 0xaf, 0xe2, + 0x61, 0x36, 0xa4, 0xad, 0x4a, 0x95, 0x45, 0xaf, 0xc0, 0xa8, 0x78, 0x68, 0x2d, 0xb2, 0x9a, 0x4e, + 0x99, 0xaf, 0xe1, 0xaa, 0x3a, 0x12, 0x9b, 0xb4, 0xe8, 0x26, 0x0c, 0x47, 0x7e, 0x83, 0x3d, 0xe9, + 0xa2, 0x62, 0xde, 0x99, 0xfc, 0xe8, 0x78, 0x1b, 0x8a, 0x4c, 0x57, 0x1a, 0xab, 0xa2, 0x58, 0xe7, + 0x83, 0x36, 0xf8, 0x7c, 0x67, 0x81, 0xef, 0x49, 0x38, 0xfd, 0x50, 0xfe, 0x99, 0xa4, 0xe2, 0xe3, + 0x9b, 0xcb, 0x41, 0x94, 0xc4, 0x3a, 0x1b, 0x74, 0x05, 0x26, 0x5b, 0x81, 0xeb, 0xb3, 0x39, 0xa1, + 0x8c, 0x96, 0xd3, 0x66, 0x9a, 0xab, 0x4a, 0x92, 0x00, 0xa7, 0xcb, 0xb0, 0x77, 0xf2, 0x02, 0x38, + 0x7d, 0x96, 0xa7, 0xea, 0xe0, 0x37, 0x59, 0x0e, 0xc3, 0x0a, 0x8b, 0xd6, 0xd8, 0x4e, 0xcc, 0x95, + 0x30, 0xd3, 0x33, 0xf9, 0x61, 0x8c, 0x74, 0x65, 0x0d, 0x17, 0x5e, 0xd5, 0x5f, 0x1c, 0x73, 0x40, + 0x75, 0x2d, 0x43, 0x26, 0xbd, 0x02, 0x84, 0xd3, 0x8f, 0x74, 0xf0, 0x07, 0x4c, 0x5c, 0x8a, 0x62, + 0x81, 0xc0, 0x00, 0x87, 0x38, 0xc1, 0x13, 0x7d, 0x04, 0x26, 0x44, 0xf0, 0xc5, 0xb8, 0x9b, 0xce, + 0xc5, 0x8e, 0xf2, 0x38, 0x81, 0xc3, 0x29, 0x6a, 0x9e, 0x2a, 0xc3, 0xd9, 0x6c, 0x10, 0xb1, 0xf5, + 0x5d, 0x77, 0xbd, 0xdd, 0x70, 0xfa, 0x3c, 0xdb, 0x1f, 0x44, 0xaa, 0x8c, 0x24, 0x16, 0x67, 0x94, + 0x40, 0x1b, 0x30, 0xd1, 0x0a, 0x08, 0x69, 0x32, 0x41, 0x5f, 0x9c, 0x67, 0xb3, 0x3c, 0x4c, 0x04, + 0x6d, 0x49, 0x25, 0x81, 0x3b, 0xcc, 0x80, 0xe1, 0x14, 0x07, 0x74, 0x07, 0x86, 0xfc, 0x3d, 0x12, + 0xec, 0x10, 0xa7, 0x3e, 0x7d, 0xa1, 0xc3, 0xc3, 0x0d, 0x71, 0xb8, 0xdd, 0x10, 0xb4, 0x09, 0x47, + 0x07, 0x09, 0xee, 0xee, 0xe8, 0x20, 0x2b, 0x43, 0xff, 0xaf, 0x05, 0x67, 0xa5, 0x6d, 0xa4, 0xda, + 0xa2, 0xbd, 0xbe, 0xe4, 0x7b, 0x61, 0x14, 0xf0, 0xc0, 0x06, 0x8f, 0xe6, 0x3f, 0xf6, 0xdf, 0xc8, + 0x29, 0xa4, 0xf4, 0xc0, 0x67, 0xf3, 0x28, 0x42, 0x9c, 0x5f, 0x23, 0x5a, 0x82, 0xc9, 0x90, 0x44, + 0x72, 0x33, 0x5a, 0x08, 0x57, 0x5f, 0x5f, 0x5e, 0x9f, 0x7e, 0x8c, 0x47, 0x65, 0xa0, 0x8b, 0xa1, + 0x9a, 0x44, 0xe2, 0x34, 0x3d, 0xba, 0x0c, 0x05, 0x3f, 0x9c, 0x7e, 0xbc, 0x43, 0x52, 0x55, 0xbf, + 0x7e, 0xa3, 0xca, 0x1d, 0xde, 0x6e, 0x54, 0x71, 0xc1, 0x0f, 0x65, 0xba, 0x0a, 0x7a, 0x1f, 0x0b, + 0xa7, 0x9f, 0xe0, 0x5a, 0x43, 0x99, 0xae, 0x82, 0x01, 0x71, 0x8c, 0x47, 0x3b, 0x30, 0x1e, 0x1a, + 0xf7, 0xde, 0x70, 0xfa, 0x22, 0xeb, 0xa9, 0x27, 0xf2, 0x06, 0xcd, 0xa0, 0xd6, 0xa2, 0xcd, 0x9b, + 0x5c, 0x70, 0x92, 0x2d, 0x5f, 0x5d, 0xda, 0x05, 0x3f, 0x9c, 0x7e, 0xb2, 0xcb, 0xea, 0xd2, 0x88, + 0xf5, 0xd5, 0xa5, 0xf3, 0xc0, 0x09, 0x9e, 0x33, 0xdf, 0x05, 0x93, 0x29, 0x71, 0xe9, 0x28, 0x99, + 0x98, 0x66, 0x76, 0x61, 0xd4, 0x98, 0x92, 0x0f, 0xd4, 0xb1, 0xe0, 0x9f, 0x0e, 0x42, 0x49, 0x19, + 0x9d, 0xd1, 0xbc, 0xe9, 0x4b, 0x70, 0x36, 0xe9, 0x4b, 0x30, 0x54, 0xf1, 0xeb, 0x86, 0xfb, 0xc0, + 0x46, 0x46, 0xec, 0xbe, 0xbc, 0x0d, 0xb0, 0xf7, 0x37, 0x0d, 0x9a, 0x26, 0xbf, 0xd8, 0xb3, 0x53, + 0x42, 0x5f, 0x47, 0xe3, 0xc0, 0x15, 0x98, 0xf4, 0x7c, 0x26, 0xa3, 0x93, 0xba, 0x14, 0xc0, 0x98, + 0x9c, 0x55, 0xd2, 0x83, 0xe1, 0x24, 0x08, 0x70, 0xba, 0x0c, 0xad, 0x90, 0x0b, 0x4a, 0x49, 0x6b, + 0x04, 0x97, 0xa3, 0xb0, 0xc0, 0xa2, 0xc7, 0xa0, 0xbf, 0xe5, 0xd7, 0xcb, 0x15, 0x21, 0x9f, 0x6b, + 0x11, 0x63, 0xeb, 0xe5, 0x0a, 0xe6, 0x38, 0xb4, 0x00, 0x03, 0xec, 0x47, 0x38, 0x3d, 0x92, 0x1f, + 0xf5, 0x84, 0x95, 0xd0, 0xf2, 0x5c, 0xb1, 0x02, 0x58, 0x14, 0x64, 0x5a, 0x51, 0x7a, 0xa9, 0x61, + 0x5a, 0xd1, 0xc1, 0xfb, 0xd4, 0x8a, 0x4a, 0x06, 0x38, 0xe6, 0x85, 0xee, 0xc2, 0x69, 0xe3, 0x22, + 0xc9, 0xa7, 0x08, 0x09, 0x45, 0xe4, 0x85, 0xc7, 0x3a, 0xde, 0x20, 0x85, 0x13, 0xc3, 0x39, 0xd1, + 0xe8, 0xd3, 0xe5, 0x2c, 0x4e, 0x38, 0xbb, 0x02, 0xd4, 0x80, 0xc9, 0x5a, 0xaa, 0xd6, 0xa1, 0xde, + 0x6b, 0x55, 0x03, 0x9a, 0xae, 0x31, 0xcd, 0x18, 0xbd, 0x02, 0x43, 0x6f, 0xfa, 0x21, 0x3b, 0xdb, + 0xc4, 0x9d, 0x42, 0x3e, 0xdb, 0x1f, 0x7a, 0xfd, 0x46, 0x95, 0xc1, 0x0f, 0x0f, 0x66, 0x87, 0x2b, + 0x7e, 0x5d, 0xfe, 0xc5, 0xaa, 0x00, 0xfa, 0x01, 0x0b, 0x66, 0xd2, 0x37, 0x55, 0xd5, 0xe8, 0xd1, + 0xde, 0x1b, 0x6d, 0x8b, 0x4a, 0x67, 0x56, 0x72, 0xd9, 0xe1, 0x0e, 0x55, 0xd9, 0xbf, 0x6c, 0x31, + 0xdd, 0xaa, 0x30, 0x0e, 0x92, 0xb0, 0xdd, 0x38, 0x89, 0xf4, 0xbe, 0x2b, 0x86, 0xdd, 0xf2, 0xbe, + 0x9d, 0x5a, 0xfe, 0xa1, 0xc5, 0x9c, 0x5a, 0x4e, 0xf0, 0xf5, 0xca, 0xeb, 0x30, 0x14, 0xc9, 0xb4, + 0xcb, 0x1d, 0x32, 0x12, 0x6b, 0x8d, 0x62, 0x8e, 0x3d, 0x4a, 0xc2, 0x57, 0x19, 0x96, 0x15, 0x1b, + 0xfb, 0xef, 0xf2, 0x11, 0x90, 0x98, 0x13, 0x30, 0x0f, 0x2d, 0x9b, 0xe6, 0xa1, 0xd9, 0x2e, 0x5f, + 0x90, 0x63, 0x26, 0xfa, 0x3b, 0x66, 0xbb, 0x99, 0x66, 0xeb, 0x9d, 0xee, 0x4d, 0x65, 0x7f, 0xc1, + 0x02, 0x88, 0x03, 0x72, 0xf7, 0x90, 0x58, 0xef, 0x25, 0x2a, 0xd3, 0xfb, 0x91, 0x5f, 0xf3, 0x1b, + 0xc2, 0xf8, 0xf9, 0x48, 0x6c, 0xa1, 0xe2, 0xf0, 0x43, 0xed, 0x37, 0x56, 0xd4, 0x68, 0x56, 0x86, + 0xff, 0x2b, 0xc6, 0x36, 0x53, 0x23, 0xf4, 0xdf, 0x97, 0x2c, 0x38, 0x95, 0xe5, 0x0a, 0x4d, 0x6f, + 0x88, 0x5c, 0xc7, 0xa7, 0x3c, 0xdd, 0xd4, 0x68, 0xde, 0x12, 0x70, 0xac, 0x28, 0x7a, 0xce, 0x58, + 0x78, 0xb4, 0x48, 0xd8, 0x37, 0x60, 0xb4, 0x12, 0x10, 0xed, 0x70, 0x7d, 0x95, 0x87, 0x94, 0xe0, + 0xed, 0x79, 0xe6, 0xc8, 0xe1, 0x24, 0xec, 0xaf, 0x14, 0xe0, 0x14, 0x77, 0x18, 0x59, 0xd8, 0xf3, + 0xdd, 0x7a, 0xc5, 0xaf, 0x8b, 0x07, 0x6f, 0x1f, 0x87, 0x91, 0x96, 0xa6, 0x98, 0xed, 0x14, 0xd5, + 0x55, 0x57, 0xe0, 0xc6, 0xaa, 0x24, 0x1d, 0x8a, 0x0d, 0x5e, 0xa8, 0x0e, 0x23, 0x64, 0xcf, 0xad, + 0x29, 0xaf, 0x83, 0xc2, 0x91, 0x0f, 0x3a, 0x55, 0xcb, 0x8a, 0xc6, 0x07, 0x1b, 0x5c, 0x1f, 0x40, + 0x1e, 0x71, 0xfb, 0xc7, 0x2c, 0x78, 0x28, 0x27, 0x06, 0x2c, 0xad, 0xee, 0x0e, 0x73, 0xcd, 0x11, + 0xd3, 0x56, 0x55, 0xc7, 0x1d, 0x76, 0xb0, 0xc0, 0xa2, 0x8f, 0x02, 0x70, 0x87, 0x1b, 0xe2, 0xd5, + 0xba, 0x06, 0xcb, 0x34, 0xe2, 0xfc, 0x69, 0x21, 0xdb, 0x64, 0x79, 0xac, 0xf1, 0xb2, 0xbf, 0xd4, + 0x07, 0xfd, 0xcc, 0xc1, 0x03, 0x55, 0x60, 0x70, 0x87, 0x67, 0xf5, 0xe9, 0x38, 0x6e, 0x94, 0x56, + 0x26, 0x0a, 0x8a, 0xc7, 0x4d, 0x83, 0x62, 0xc9, 0x06, 0xad, 0xc1, 0x14, 0x4f, 0xae, 0xd4, 0x58, + 0x26, 0x0d, 0x67, 0x5f, 0xea, 0x3c, 0x79, 0x26, 0x60, 0xa5, 0xfb, 0x2d, 0xa7, 0x49, 0x70, 0x56, + 0x39, 0xf4, 0x2a, 0x8c, 0xd1, 0x3b, 0xa8, 0xdf, 0x8e, 0x24, 0x27, 0x9e, 0x56, 0x49, 0x89, 0xe5, + 0x1b, 0x06, 0x16, 0x27, 0xa8, 0xd1, 0x2b, 0x30, 0xda, 0x4a, 0x69, 0x77, 0xfb, 0x63, 0x35, 0x88, + 0xa9, 0xd1, 0x35, 0x69, 0x99, 0x37, 0x74, 0x9b, 0xf9, 0x7e, 0x6f, 0xec, 0x04, 0x24, 0xdc, 0xf1, + 0x1b, 0x75, 0x26, 0xfe, 0xf5, 0x6b, 0xde, 0xd0, 0x09, 0x3c, 0x4e, 0x95, 0xa0, 0x5c, 0xb6, 0x1c, + 0xb7, 0xd1, 0x0e, 0x48, 0xcc, 0x65, 0xc0, 0xe4, 0xb2, 0x9a, 0xc0, 0xe3, 0x54, 0x89, 0xee, 0x6a, + 0xeb, 0xc1, 0xe3, 0x51, 0x5b, 0xdb, 0x3f, 0x53, 0x00, 0x63, 0x68, 0xbf, 0x73, 0xd3, 0x3d, 0xd1, + 0x2f, 0xdb, 0x0e, 0x5a, 0x35, 0xe1, 0xcc, 0x94, 0xf9, 0x65, 0x71, 0x16, 0x57, 0xfe, 0x65, 0xf4, + 0x3f, 0x66, 0xa5, 0xe8, 0x1a, 0x3f, 0x5d, 0x09, 0x7c, 0x7a, 0xc8, 0xc9, 0xa0, 0x63, 0xea, 0xd1, + 0xc1, 0xa0, 0x7c, 0x90, 0xdd, 0x21, 0x3c, 0xa7, 0x70, 0xcb, 0xe6, 0x1c, 0x0c, 0xbf, 0x9f, 0xaa, + 0x88, 0x8c, 0x20, 0xb9, 0xa0, 0xcb, 0x30, 0x2c, 0x72, 0xf8, 0x30, 0xdf, 0x78, 0xbe, 0x98, 0x98, + 0x9f, 0xd2, 0x72, 0x0c, 0xc6, 0x3a, 0x8d, 0xfd, 0x83, 0x05, 0x98, 0xca, 0x78, 0xdc, 0xc4, 0x8f, + 0x91, 0x6d, 0x37, 0x8c, 0x54, 0xa2, 0x58, 0xed, 0x18, 0xe1, 0x70, 0xac, 0x28, 0xe8, 0x5e, 0xc5, + 0x0f, 0xaa, 0xe4, 0xe1, 0x24, 0x1e, 0x0f, 0x08, 0xec, 0x11, 0x53, 0xae, 0x5e, 0x80, 0xbe, 0x76, + 0x48, 0x64, 0x60, 0x5d, 0x75, 0x6c, 0x33, 0x9b, 0x2e, 0xc3, 0xd0, 0x6b, 0xd4, 0xb6, 0x32, 0x8f, + 0x6a, 0xd7, 0x28, 0x6e, 0x20, 0xe5, 0x38, 0xda, 0xb8, 0x88, 0x78, 0x8e, 0x17, 0x89, 0xcb, 0x56, + 0x1c, 0x21, 0x92, 0x41, 0xb1, 0xc0, 0xda, 0x5f, 0x2c, 0xc2, 0xd9, 0xdc, 0xe7, 0x8e, 0xb4, 0xe9, + 0x4d, 0xdf, 0x73, 0x23, 0x5f, 0x39, 0x80, 0xf1, 0xa8, 0x90, 0xa4, 0xb5, 0xb3, 0x26, 0xe0, 0x58, + 0x51, 0xa0, 0x8b, 0xd0, 0xcf, 0x34, 0xc2, 0xa9, 0x94, 0xb9, 0x8b, 0xcb, 0x3c, 0x4c, 0x18, 0x47, + 0xf7, 0x9c, 0xe5, 0xfc, 0x31, 0x2a, 0xc1, 0xf8, 0x8d, 0xe4, 0x81, 0x42, 0x9b, 0xeb, 0xfb, 0x0d, + 0xcc, 0x90, 0xe8, 0x09, 0xd1, 0x5f, 0x09, 0x8f, 0x27, 0xec, 0xd4, 0xfd, 0x50, 0xeb, 0xb4, 0xa7, + 0x60, 0x70, 0x97, 0xec, 0x07, 0xae, 0xb7, 0x9d, 0xf4, 0x84, 0xbb, 0xc6, 0xc1, 0x58, 0xe2, 0xcd, + 0x1c, 0x8f, 0x83, 0xc7, 0x9d, 0x9e, 0x7c, 0xa8, 0xab, 0x78, 0xf2, 0xc3, 0x45, 0x18, 0xc7, 0x8b, + 0xcb, 0xef, 0x0e, 0xc4, 0xcd, 0xf4, 0x40, 0x1c, 0x77, 0x7a, 0xf2, 0xee, 0xa3, 0xf1, 0x0b, 0x16, + 0x8c, 0xb3, 0x4c, 0x42, 0x22, 0xa8, 0x81, 0xeb, 0x7b, 0x27, 0x70, 0x15, 0x78, 0x0c, 0xfa, 0x03, + 0x5a, 0x69, 0x32, 0x57, 0x2e, 0x6b, 0x09, 0xe6, 0x38, 0xf4, 0x08, 0xf4, 0xb1, 0x26, 0xd0, 0xc1, + 0x1b, 0xe1, 0x5b, 0xf0, 0xb2, 0x13, 0x39, 0x98, 0x41, 0x59, 0x90, 0x2c, 0x4c, 0x5a, 0x0d, 0x97, + 0x37, 0x3a, 0xb6, 0xd7, 0xbf, 0x33, 0x02, 0x21, 0x64, 0x36, 0xed, 0xed, 0x05, 0xc9, 0xca, 0x66, + 0xd9, 0xf9, 0x9a, 0xfd, 0xc7, 0x05, 0x38, 0x9f, 0x59, 0xae, 0xe7, 0x20, 0x59, 0x9d, 0x4b, 0x3f, + 0xc8, 0x5c, 0x31, 0xc5, 0x13, 0xf4, 0x33, 0xee, 0xeb, 0x55, 0xfa, 0xef, 0xef, 0x21, 0x76, 0x55, + 0x66, 0x97, 0xbd, 0x43, 0x62, 0x57, 0x65, 0xb6, 0x2d, 0x47, 0x4d, 0xf0, 0xe7, 0x85, 0x9c, 0x6f, + 0x61, 0x0a, 0x83, 0x4b, 0x74, 0x9f, 0x61, 0xc8, 0x50, 0x5e, 0xc2, 0xf9, 0x1e, 0xc3, 0x61, 0x58, + 0x61, 0xd1, 0x02, 0x8c, 0x37, 0x5d, 0x8f, 0x6e, 0x3e, 0xfb, 0xa6, 0x28, 0xae, 0x14, 0xf9, 0x6b, + 0x26, 0x1a, 0x27, 0xe9, 0x91, 0xab, 0xc5, 0xb5, 0xe2, 0x5f, 0xf7, 0xca, 0x91, 0x56, 0xdd, 0x9c, + 0xe9, 0xcb, 0xa0, 0x7a, 0x31, 0x23, 0xc6, 0xd5, 0x9a, 0xa6, 0x27, 0x2a, 0xf6, 0xae, 0x27, 0x1a, + 0xc9, 0xd6, 0x11, 0xcd, 0xbc, 0x02, 0xa3, 0xf7, 0x6d, 0x18, 0xb0, 0xbf, 0x51, 0x84, 0x87, 0x3b, + 0x2c, 0x7b, 0xbe, 0xd7, 0x1b, 0x63, 0xa0, 0xed, 0xf5, 0xa9, 0x71, 0xa8, 0xc0, 0xa9, 0xad, 0x76, + 0xa3, 0xb1, 0xcf, 0x9e, 0xdf, 0x90, 0xba, 0xa4, 0x10, 0x32, 0xa5, 0x54, 0x8e, 0x9c, 0x5a, 0xcd, + 0xa0, 0xc1, 0x99, 0x25, 0xe9, 0x15, 0x8b, 0x9e, 0x24, 0xfb, 0x8a, 0x55, 0xe2, 0x8a, 0x85, 0x75, + 0x24, 0x36, 0x69, 0xd1, 0x15, 0x98, 0x74, 0xf6, 0x1c, 0x97, 0x07, 0x07, 0x97, 0x0c, 0xf8, 0x1d, + 0x4b, 0xe9, 0x73, 0x17, 0x92, 0x04, 0x38, 0x5d, 0x06, 0xbd, 0x06, 0xc8, 0xdf, 0x64, 0x4e, 0xfa, + 0xf5, 0x2b, 0xc4, 0x13, 0x26, 0x67, 0x36, 0x76, 0xc5, 0x78, 0x4b, 0xb8, 0x91, 0xa2, 0xc0, 0x19, + 0xa5, 0x12, 0x41, 0x9c, 0x06, 0xf2, 0x83, 0x38, 0x75, 0xde, 0x17, 0xbb, 0xa6, 0x29, 0xba, 0x0c, + 0xa3, 0x47, 0x74, 0x3d, 0xb5, 0xff, 0xad, 0x45, 0x4f, 0x3c, 0x5e, 0xc6, 0x8c, 0x90, 0xfa, 0x0a, + 0xf3, 0x8d, 0xe5, 0xea, 0x61, 0x2d, 0x4a, 0xce, 0x69, 0xcd, 0x37, 0x36, 0x46, 0x62, 0x93, 0x96, + 0xcf, 0x21, 0xcd, 0xa7, 0xd5, 0xb8, 0x15, 0x88, 0x30, 0x6e, 0x8a, 0x02, 0x7d, 0x0c, 0x06, 0xeb, + 0xee, 0x9e, 0x1b, 0x0a, 0xe5, 0xd8, 0x91, 0x2d, 0x51, 0xf1, 0xd6, 0xb9, 0xcc, 0xd9, 0x60, 0xc9, + 0xcf, 0xfe, 0xe1, 0x42, 0xdc, 0x27, 0xaf, 0xb7, 0xfd, 0xc8, 0x39, 0x81, 0x93, 0xfc, 0x8a, 0x71, + 0x92, 0x3f, 0xd1, 0x29, 0x96, 0x1d, 0x6b, 0x52, 0xee, 0x09, 0x7e, 0x23, 0x71, 0x82, 0x3f, 0xd9, + 0x9d, 0x55, 0xe7, 0x93, 0xfb, 0xef, 0x59, 0x30, 0x69, 0xd0, 0x9f, 0xc0, 0x01, 0xb2, 0x6a, 0x1e, + 0x20, 0x8f, 0x76, 0xfd, 0x86, 0x9c, 0x83, 0xe3, 0xfb, 0x8a, 0x89, 0xb6, 0xb3, 0x03, 0xe3, 0x4d, + 0xe8, 0xdb, 0x71, 0x82, 0x7a, 0xa7, 0xdc, 0x1d, 0xa9, 0x42, 0x73, 0x57, 0x9d, 0x40, 0x98, 0xe9, + 0x9f, 0x91, 0xbd, 0x4e, 0x41, 0x5d, 0x4d, 0xf4, 0xac, 0x2a, 0xf4, 0x12, 0x0c, 0x84, 0x35, 0xbf, + 0xa5, 0xde, 0xeb, 0x5c, 0x60, 0x1d, 0xcd, 0x20, 0x87, 0x07, 0xb3, 0xc8, 0xac, 0x8e, 0x82, 0xb1, + 0xa0, 0x47, 0x1f, 0x87, 0x51, 0xf6, 0x4b, 0xf9, 0xcc, 0x15, 0xf3, 0x35, 0x18, 0x55, 0x9d, 0x90, + 0x3b, 0x94, 0x1a, 0x20, 0x6c, 0xb2, 0x9a, 0xd9, 0x86, 0x92, 0xfa, 0xac, 0x07, 0x6a, 0xea, 0xfd, + 0xd7, 0x45, 0x98, 0xca, 0x98, 0x73, 0x28, 0x34, 0x46, 0xe2, 0x72, 0x8f, 0x53, 0xf5, 0x6d, 0x8e, + 0x45, 0xc8, 0x2e, 0x50, 0x75, 0x31, 0xb7, 0x7a, 0xae, 0xf4, 0x66, 0x48, 0x92, 0x95, 0x52, 0x50, + 0xf7, 0x4a, 0x69, 0x65, 0x27, 0xd6, 0xd5, 0xb4, 0x22, 0xd5, 0xd2, 0x07, 0x3a, 0xa6, 0xbf, 0xda, + 0x07, 0xa7, 0xb2, 0xc2, 0x6b, 0xa2, 0xcf, 0x26, 0x32, 0xc7, 0xbe, 0xd0, 0x6b, 0x60, 0x4e, 0x9e, + 0x4e, 0x56, 0x84, 0xfd, 0x9b, 0x33, 0x73, 0xc9, 0x76, 0xed, 0x66, 0x51, 0x27, 0x0b, 0x3c, 0x12, + 0xf0, 0x8c, 0xbf, 0x72, 0xfb, 0x78, 0x7f, 0xcf, 0x0d, 0x10, 0xa9, 0x82, 0xc3, 0x84, 0x3f, 0x8e, + 0x04, 0x77, 0xf7, 0xc7, 0x91, 0x35, 0xa3, 0x32, 0x0c, 0xd4, 0xb8, 0xa3, 0x47, 0xb1, 0xfb, 0x16, + 0xc6, 0xbd, 0x3c, 0xd4, 0x06, 0x2c, 0xbc, 0x3b, 0x04, 0x83, 0x19, 0x17, 0x86, 0xb5, 0x8e, 0x79, + 0xa0, 0x93, 0x67, 0x97, 0x1e, 0x7c, 0x5a, 0x17, 0x3c, 0xd0, 0x09, 0xf4, 0x63, 0x16, 0x24, 0x5e, + 0x7b, 0x28, 0xa5, 0x9c, 0x95, 0xab, 0x94, 0xbb, 0x00, 0x7d, 0x81, 0xdf, 0x20, 0xc9, 0x6c, 0xad, + 0xd8, 0x6f, 0x10, 0xcc, 0x30, 0x94, 0x22, 0x8a, 0x55, 0x2d, 0x23, 0xfa, 0x35, 0x52, 0x5c, 0x10, + 0x1f, 0x83, 0xfe, 0x06, 0xd9, 0x23, 0x8d, 0x64, 0x52, 0xad, 0xeb, 0x14, 0x88, 0x39, 0xce, 0xfe, + 0x85, 0x3e, 0x38, 0xd7, 0x31, 0x0a, 0x10, 0xbd, 0x8c, 0x6d, 0x3b, 0x11, 0xb9, 0xe3, 0xec, 0x27, + 0xb3, 0xdf, 0x5c, 0xe1, 0x60, 0x2c, 0xf1, 0xec, 0xe9, 0x21, 0x0f, 0x62, 0x9f, 0x50, 0x61, 0x8a, + 0xd8, 0xf5, 0x02, 0x6b, 0xaa, 0xc4, 0x8a, 0xc7, 0xa1, 0x12, 0x7b, 0x0e, 0x20, 0x0c, 0x1b, 0xdc, + 0x27, 0xae, 0x2e, 0xde, 0x34, 0xc6, 0xc9, 0x0e, 0xaa, 0xd7, 0x05, 0x06, 0x6b, 0x54, 0x68, 0x19, + 0x26, 0x5a, 0x81, 0x1f, 0x71, 0x8d, 0xf0, 0x32, 0x77, 0x1b, 0xed, 0x37, 0x03, 0xb0, 0x54, 0x12, + 0x78, 0x9c, 0x2a, 0x81, 0x5e, 0x84, 0x61, 0x11, 0x94, 0xa5, 0xe2, 0xfb, 0x0d, 0xa1, 0x84, 0x52, + 0x9e, 0x94, 0xd5, 0x18, 0x85, 0x75, 0x3a, 0xad, 0x18, 0x53, 0x33, 0x0f, 0x66, 0x16, 0xe3, 0xaa, + 0x66, 0x8d, 0x2e, 0x11, 0xb5, 0x77, 0xa8, 0xa7, 0xa8, 0xbd, 0xb1, 0x5a, 0xae, 0xd4, 0xb3, 0xd5, + 0x13, 0xba, 0x2a, 0xb2, 0xbe, 0xda, 0x07, 0x53, 0x62, 0xe2, 0x3c, 0xe8, 0xe9, 0x72, 0x33, 0x3d, + 0x5d, 0x8e, 0x43, 0x71, 0xf7, 0xee, 0x9c, 0x39, 0xe9, 0x39, 0xf3, 0x23, 0x16, 0x98, 0x92, 0x1a, + 0xfa, 0xbf, 0x72, 0xd3, 0x87, 0xbd, 0x98, 0x2b, 0xf9, 0x29, 0xaf, 0xc1, 0xb7, 0x99, 0x48, 0xcc, + 0xfe, 0x37, 0x16, 0x3c, 0xda, 0x95, 0x23, 0x5a, 0x81, 0x12, 0x13, 0x27, 0xb5, 0x8b, 0xde, 0x93, + 0xca, 0xad, 0x5c, 0x22, 0x72, 0xa4, 0xdb, 0xb8, 0x24, 0x5a, 0x49, 0xe5, 0x69, 0x7b, 0x2a, 0x23, + 0x4f, 0xdb, 0x69, 0xa3, 0x7b, 0xee, 0x33, 0x51, 0xdb, 0x0f, 0xd1, 0x13, 0xc7, 0x78, 0xd2, 0x85, + 0xde, 0x6f, 0x28, 0x1d, 0xed, 0x84, 0xd2, 0x11, 0x99, 0xd4, 0xda, 0x19, 0xf2, 0x11, 0x98, 0x60, + 0xd1, 0xda, 0xd8, 0x23, 0x07, 0xf1, 0xd8, 0xac, 0x10, 0x3b, 0x32, 0x5f, 0x4f, 0xe0, 0x70, 0x8a, + 0xda, 0xfe, 0xa3, 0x22, 0x0c, 0xf0, 0xe5, 0x77, 0x02, 0xd7, 0xcb, 0xa7, 0xa1, 0xe4, 0x36, 0x9b, + 0x6d, 0x9e, 0x7a, 0xab, 0x3f, 0x76, 0x8b, 0x2d, 0x4b, 0x20, 0x8e, 0xf1, 0x68, 0x55, 0xe8, 0xbb, + 0x3b, 0x04, 0x84, 0xe5, 0x0d, 0x9f, 0x5b, 0x76, 0x22, 0x87, 0xcb, 0x4a, 0xea, 0x9c, 0x8d, 0x35, + 0xe3, 0xe8, 0x53, 0x00, 0x61, 0x14, 0xb8, 0xde, 0x36, 0x85, 0x89, 0x38, 0xd4, 0xef, 0xed, 0xc0, + 0xad, 0xaa, 0x88, 0x39, 0xcf, 0x78, 0xcf, 0x51, 0x08, 0xac, 0x71, 0x44, 0x73, 0xc6, 0x49, 0x3f, + 0x93, 0x18, 0x3b, 0xe0, 0x5c, 0xe3, 0x31, 0x9b, 0xf9, 0x00, 0x94, 0x14, 0xf3, 0x6e, 0xda, 0xaf, + 0x11, 0x5d, 0x2c, 0xfa, 0x30, 0x8c, 0x27, 0xda, 0x76, 0x24, 0xe5, 0xd9, 0x2f, 0x5a, 0x30, 0xce, + 0x1b, 0xb3, 0xe2, 0xed, 0x89, 0xd3, 0xe0, 0x2d, 0x38, 0xd5, 0xc8, 0xd8, 0x95, 0xc5, 0xf0, 0xf7, + 0xbe, 0x8b, 0x2b, 0x65, 0x59, 0x16, 0x16, 0x67, 0xd6, 0x81, 0x2e, 0xd1, 0x15, 0x47, 0x77, 0x5d, + 0xa7, 0x21, 0xde, 0xd6, 0x8f, 0xf0, 0xd5, 0xc6, 0x61, 0x58, 0x61, 0xed, 0xdf, 0xb5, 0x60, 0x92, + 0xb7, 0xfc, 0x1a, 0xd9, 0x57, 0x7b, 0xd3, 0xb7, 0xb2, 0xed, 0x22, 0xe9, 0x63, 0x21, 0x27, 0xe9, + 0xa3, 0xfe, 0x69, 0xc5, 0x8e, 0x9f, 0xf6, 0x15, 0x0b, 0xc4, 0x0c, 0x39, 0x01, 0x7d, 0xc6, 0x77, + 0x99, 0xfa, 0x8c, 0x99, 0xfc, 0x45, 0x90, 0xa3, 0xc8, 0xf8, 0x33, 0x0b, 0x26, 0x38, 0x41, 0x6c, + 0xab, 0xff, 0x96, 0x8e, 0x43, 0x2f, 0xa9, 0xe1, 0xaf, 0x91, 0xfd, 0x0d, 0xbf, 0xe2, 0x44, 0x3b, + 0xd9, 0x1f, 0x65, 0x0c, 0x56, 0x5f, 0xc7, 0xc1, 0xaa, 0xcb, 0x05, 0x64, 0xe4, 0x44, 0xea, 0xf2, + 0x42, 0xfe, 0xa8, 0x39, 0x91, 0xec, 0x6f, 0x5a, 0x80, 0x78, 0x35, 0x86, 0xe0, 0x46, 0xc5, 0x21, + 0x06, 0xd5, 0x0e, 0xba, 0x78, 0x6b, 0x52, 0x18, 0xac, 0x51, 0x1d, 0x4b, 0xf7, 0x24, 0x1c, 0x2e, + 0x8a, 0xdd, 0x1d, 0x2e, 0x8e, 0xd0, 0xa3, 0xff, 0x6c, 0x00, 0x92, 0xcf, 0xda, 0xd0, 0x2d, 0x18, + 0xa9, 0x39, 0x2d, 0x67, 0xd3, 0x6d, 0xb8, 0x91, 0x4b, 0xc2, 0x4e, 0xde, 0x58, 0x4b, 0x1a, 0x9d, + 0x30, 0x91, 0x6b, 0x10, 0x6c, 0xf0, 0x41, 0x73, 0x00, 0xad, 0xc0, 0xdd, 0x73, 0x1b, 0x64, 0x9b, + 0xa9, 0x5d, 0x58, 0x34, 0x0f, 0xee, 0x1a, 0x26, 0xa1, 0x58, 0xa3, 0xc8, 0x88, 0x21, 0x50, 0x7c, + 0xc0, 0x31, 0x04, 0xe0, 0xc4, 0x62, 0x08, 0xf4, 0x1d, 0x29, 0x86, 0xc0, 0xd0, 0x91, 0x63, 0x08, + 0xf4, 0xf7, 0x14, 0x43, 0x00, 0xc3, 0x19, 0x29, 0x7b, 0xd2, 0xff, 0xab, 0x6e, 0x83, 0x88, 0x0b, + 0x07, 0x0f, 0x41, 0x32, 0x73, 0xef, 0x60, 0xf6, 0x0c, 0xce, 0xa4, 0xc0, 0x39, 0x25, 0xd1, 0x47, + 0x61, 0xda, 0x69, 0x34, 0xfc, 0x3b, 0x6a, 0x50, 0x57, 0xc2, 0x9a, 0xd3, 0xe0, 0x26, 0x90, 0x41, + 0xc6, 0xf5, 0x91, 0x7b, 0x07, 0xb3, 0xd3, 0x0b, 0x39, 0x34, 0x38, 0xb7, 0x34, 0xfa, 0x10, 0x94, + 0x5a, 0x81, 0x5f, 0x5b, 0xd3, 0xde, 0xde, 0x9e, 0xa7, 0x1d, 0x58, 0x91, 0xc0, 0xc3, 0x83, 0xd9, + 0x51, 0xf5, 0x87, 0x1d, 0xf8, 0x71, 0x81, 0x8c, 0xa0, 0x00, 0xc3, 0xc7, 0x1a, 0x14, 0x60, 0x17, + 0xa6, 0xaa, 0x24, 0x70, 0x9d, 0x86, 0xfb, 0x16, 0x95, 0x97, 0xe5, 0xfe, 0xb4, 0x01, 0xa5, 0x20, + 0xb1, 0x23, 0xf7, 0x14, 0xa4, 0x55, 0x4b, 0x4e, 0x23, 0x77, 0xe0, 0x98, 0x91, 0xfd, 0x3f, 0x2c, + 0x18, 0x14, 0xcf, 0xd8, 0x4e, 0x40, 0x6a, 0x5c, 0x30, 0x8c, 0x12, 0xb3, 0xd9, 0x1d, 0xc6, 0x1a, + 0x93, 0x6b, 0x8e, 0x28, 0x27, 0xcc, 0x11, 0x8f, 0x76, 0x62, 0xd2, 0xd9, 0x10, 0xf1, 0x97, 0x8b, + 0x54, 0x7a, 0x37, 0x1e, 0x54, 0x3f, 0xf8, 0x2e, 0x58, 0x87, 0xc1, 0x50, 0x3c, 0xe8, 0x2d, 0xe4, + 0xbf, 0xa8, 0x48, 0x0e, 0x62, 0xec, 0x45, 0x27, 0x9e, 0xf0, 0x4a, 0x26, 0x99, 0x2f, 0x85, 0x8b, + 0x0f, 0xf0, 0xa5, 0x70, 0xb7, 0x27, 0xe7, 0x7d, 0xc7, 0xf1, 0xe4, 0xdc, 0xfe, 0x3a, 0x3b, 0x39, + 0x75, 0xf8, 0x09, 0x08, 0x55, 0x57, 0xcc, 0x33, 0xd6, 0xee, 0x30, 0xb3, 0x44, 0xa3, 0x72, 0x84, + 0xab, 0x9f, 0xb7, 0xe0, 0x5c, 0xc6, 0x57, 0x69, 0x92, 0xd6, 0x33, 0x30, 0xe4, 0xb4, 0xeb, 0xae, + 0x5a, 0xcb, 0x9a, 0x69, 0x72, 0x41, 0xc0, 0xb1, 0xa2, 0x40, 0x4b, 0x30, 0x49, 0xee, 0xb6, 0x5c, + 0x6e, 0xc8, 0xd5, 0x9d, 0x8f, 0x8b, 0xfc, 0xed, 0xe3, 0x4a, 0x12, 0x89, 0xd3, 0xf4, 0x2a, 0x38, + 0x51, 0x31, 0x37, 0x38, 0xd1, 0xdf, 0xb0, 0x60, 0x58, 0x3d, 0x69, 0x7d, 0xe0, 0xbd, 0xfd, 0x11, + 0xb3, 0xb7, 0x1f, 0xee, 0xd0, 0xdb, 0x39, 0xdd, 0xfc, 0xdb, 0x05, 0xd5, 0xde, 0x8a, 0x1f, 0x44, + 0x3d, 0x48, 0x70, 0xf7, 0xff, 0x70, 0xe2, 0x32, 0x0c, 0x3b, 0xad, 0x96, 0x44, 0x48, 0x0f, 0x38, + 0x16, 0x72, 0x3b, 0x06, 0x63, 0x9d, 0x46, 0xbd, 0xe3, 0x28, 0xe6, 0xbe, 0xe3, 0xa8, 0x03, 0x44, + 0x4e, 0xb0, 0x4d, 0x22, 0x0a, 0x13, 0x0e, 0xbb, 0xf9, 0xfb, 0x4d, 0x3b, 0x72, 0x1b, 0x73, 0xae, + 0x17, 0x85, 0x51, 0x30, 0x57, 0xf6, 0xa2, 0x1b, 0x01, 0xbf, 0x42, 0x6a, 0xe1, 0xbd, 0x14, 0x2f, + 0xac, 0xf1, 0x95, 0xe1, 0x1b, 0x58, 0x1d, 0xfd, 0xa6, 0x2b, 0xc5, 0xba, 0x80, 0x63, 0x45, 0x61, + 0x7f, 0x80, 0x9d, 0x3e, 0xac, 0x4f, 0x8f, 0x16, 0xda, 0xea, 0xa7, 0x46, 0xd4, 0x68, 0x30, 0xa3, + 0xe8, 0xb2, 0x1e, 0x40, 0xab, 0xf3, 0x66, 0x4f, 0x2b, 0xd6, 0x5f, 0x15, 0xc6, 0x51, 0xb6, 0xd0, + 0x27, 0x52, 0xee, 0x31, 0xcf, 0x76, 0x39, 0x35, 0x8e, 0xe0, 0x10, 0xc3, 0xf2, 0xef, 0xb0, 0xec, + 0x24, 0xe5, 0x8a, 0x58, 0x17, 0x5a, 0xfe, 0x1d, 0x81, 0xc0, 0x31, 0x0d, 0x15, 0xa6, 0xd4, 0x9f, + 0x70, 0x1a, 0xc5, 0x71, 0x68, 0x15, 0x75, 0x88, 0x35, 0x0a, 0x34, 0x2f, 0x14, 0x0a, 0xdc, 0x2e, + 0xf0, 0x70, 0x42, 0xa1, 0x20, 0xbb, 0x4b, 0xd3, 0x02, 0x5d, 0x86, 0x61, 0x95, 0x6d, 0xbd, 0xc2, + 0x33, 0x5f, 0x89, 0x69, 0xb6, 0x12, 0x83, 0xb1, 0x4e, 0x83, 0x36, 0x60, 0x3c, 0xe4, 0x7a, 0x36, + 0x15, 0x1c, 0x9c, 0xeb, 0x2b, 0xdf, 0xab, 0x1e, 0x13, 0x9b, 0xe8, 0x43, 0x06, 0xe2, 0xbb, 0x93, + 0x0c, 0xb1, 0x90, 0x64, 0x81, 0x5e, 0x85, 0xb1, 0x86, 0xef, 0xd4, 0x17, 0x9d, 0x86, 0xe3, 0xd5, + 0x58, 0xff, 0x0c, 0x99, 0x49, 0x7b, 0xaf, 0x1b, 0x58, 0x9c, 0xa0, 0xa6, 0xc2, 0x9b, 0x0e, 0x11, + 0x21, 0xc2, 0x1c, 0x6f, 0x9b, 0x84, 0x22, 0x77, 0x36, 0x13, 0xde, 0xae, 0xe7, 0xd0, 0xe0, 0xdc, + 0xd2, 0xe8, 0x25, 0x18, 0x91, 0x9f, 0xaf, 0x45, 0x24, 0x89, 0x9f, 0xc4, 0x68, 0x38, 0x6c, 0x50, + 0xa2, 0x10, 0x4e, 0xcb, 0xff, 0x1b, 0x81, 0xb3, 0xb5, 0xe5, 0xd6, 0xc4, 0x33, 0x7d, 0xfe, 0x76, + 0xf6, 0xc3, 0xf2, 0x81, 0xe7, 0x4a, 0x16, 0xd1, 0xe1, 0xc1, 0xec, 0x23, 0xa2, 0xd7, 0x32, 0xf1, + 0x38, 0x9b, 0x37, 0x5a, 0x83, 0xa9, 0x1d, 0xe2, 0x34, 0xa2, 0x9d, 0xa5, 0x1d, 0x52, 0xdb, 0x95, + 0x0b, 0x8e, 0xc5, 0x38, 0xd1, 0x9e, 0x8e, 0x5c, 0x4d, 0x93, 0xe0, 0xac, 0x72, 0xe8, 0x0d, 0x98, + 0x6e, 0xb5, 0x37, 0x1b, 0x6e, 0xb8, 0xb3, 0xee, 0x47, 0xcc, 0x09, 0x49, 0x25, 0x6e, 0x17, 0xc1, + 0x50, 0x54, 0x14, 0x99, 0x4a, 0x0e, 0x1d, 0xce, 0xe5, 0x80, 0xde, 0x82, 0xd3, 0x89, 0x89, 0x20, + 0xc2, 0x41, 0x8c, 0xe5, 0xa7, 0x06, 0xa9, 0x66, 0x15, 0x10, 0x91, 0x55, 0xb2, 0x50, 0x38, 0xbb, + 0x0a, 0xf4, 0x32, 0x80, 0xdb, 0x5a, 0x75, 0x9a, 0x6e, 0x83, 0x5e, 0x15, 0xa7, 0xd8, 0x1c, 0xa1, + 0xd7, 0x06, 0x28, 0x57, 0x24, 0x94, 0xee, 0xcd, 0xe2, 0xdf, 0x3e, 0xd6, 0xa8, 0xd1, 0x75, 0x18, + 0x13, 0xff, 0xf6, 0xc5, 0x90, 0xf2, 0xa8, 0x24, 0x8f, 0xb3, 0x90, 0x52, 0x15, 0x1d, 0x73, 0x98, + 0x82, 0xe0, 0x44, 0x59, 0xb4, 0x0d, 0xe7, 0x64, 0x9a, 0x37, 0x7d, 0x7e, 0xca, 0x31, 0x08, 0x59, + 0x3e, 0x8e, 0x21, 0xfe, 0x2a, 0x65, 0xa1, 0x13, 0x21, 0xee, 0xcc, 0x87, 0x9e, 0xeb, 0xfa, 0x34, + 0xe7, 0xef, 0x76, 0x4f, 0x73, 0x0f, 0x27, 0x7a, 0xae, 0x5f, 0x4f, 0x22, 0x71, 0x9a, 0x1e, 0xf9, + 0x70, 0xda, 0xf5, 0xb2, 0x66, 0xf5, 0x19, 0xc6, 0xe8, 0x83, 0xfc, 0xc9, 0x72, 0xe7, 0x19, 0x9d, + 0x89, 0xc7, 0xd9, 0x7c, 0xdf, 0x9e, 0xdf, 0xdf, 0xef, 0x58, 0xb4, 0xb4, 0x26, 0x9d, 0xa3, 0x4f, + 0xc3, 0x88, 0xfe, 0x51, 0x42, 0xd2, 0xb8, 0x98, 0x2d, 0xbc, 0x6a, 0x7b, 0x02, 0x97, 0xed, 0xd5, + 0xba, 0xd7, 0x71, 0xd8, 0xe0, 0x88, 0x6a, 0x19, 0x0f, 0xfb, 0xe7, 0x7b, 0x93, 0x64, 0x7a, 0x77, + 0x7b, 0x23, 0x90, 0x3d, 0xdd, 0xd1, 0x75, 0x18, 0xaa, 0x35, 0x5c, 0xe2, 0x45, 0xe5, 0x4a, 0xa7, + 0xd0, 0x85, 0x4b, 0x82, 0x46, 0xac, 0x1f, 0x91, 0x5a, 0x83, 0xc3, 0xb0, 0xe2, 0x60, 0xff, 0x7a, + 0x01, 0x66, 0xbb, 0xe4, 0x69, 0x49, 0x98, 0xa1, 0xac, 0x9e, 0xcc, 0x50, 0x0b, 0x30, 0x1e, 0xff, + 0xd3, 0x35, 0x5c, 0xca, 0x93, 0xf5, 0x96, 0x89, 0xc6, 0x49, 0xfa, 0x9e, 0x1f, 0x25, 0xe8, 0x96, + 0xac, 0xbe, 0xae, 0xcf, 0x6a, 0x0c, 0x0b, 0x76, 0x7f, 0xef, 0xd7, 0xde, 0x5c, 0x6b, 0xa4, 0xfd, + 0xf5, 0x02, 0x9c, 0x56, 0x5d, 0xf8, 0x9d, 0xdb, 0x71, 0x37, 0xd3, 0x1d, 0x77, 0x0c, 0xb6, 0x5c, + 0xfb, 0x06, 0x0c, 0xf0, 0x58, 0x8c, 0x3d, 0x88, 0xdb, 0x8f, 0x99, 0x11, 0x9a, 0x95, 0x84, 0x67, + 0x44, 0x69, 0xfe, 0x01, 0x0b, 0xc6, 0x13, 0xaf, 0xdb, 0x10, 0xd6, 0x9e, 0x40, 0xdf, 0x8f, 0x48, + 0x9c, 0x25, 0x6c, 0x5f, 0x80, 0xbe, 0x1d, 0x3f, 0x8c, 0x92, 0x8e, 0x1e, 0x57, 0xfd, 0x30, 0xc2, + 0x0c, 0x63, 0xff, 0x9e, 0x05, 0xfd, 0x1b, 0x8e, 0xeb, 0x45, 0xd2, 0x28, 0x60, 0xe5, 0x18, 0x05, + 0x7a, 0xf9, 0x2e, 0xf4, 0x22, 0x0c, 0x90, 0xad, 0x2d, 0x52, 0x8b, 0xc4, 0xa8, 0xca, 0xf8, 0x11, + 0x03, 0x2b, 0x0c, 0x4a, 0xe5, 0x3f, 0x56, 0x19, 0xff, 0x8b, 0x05, 0x31, 0xba, 0x0d, 0xa5, 0xc8, + 0x6d, 0x92, 0x85, 0x7a, 0x5d, 0x98, 0xca, 0xef, 0x23, 0x06, 0xc6, 0x86, 0x64, 0x80, 0x63, 0x5e, + 0xf6, 0x17, 0x0b, 0x00, 0x71, 0x10, 0xab, 0x6e, 0x9f, 0xb8, 0x98, 0x32, 0xa2, 0x5e, 0xcc, 0x30, + 0xa2, 0xa2, 0x98, 0x61, 0x86, 0x05, 0x55, 0x75, 0x53, 0xb1, 0xa7, 0x6e, 0xea, 0x3b, 0x4a, 0x37, + 0x2d, 0xc1, 0x64, 0x1c, 0x84, 0xcb, 0x8c, 0x41, 0xc8, 0x8e, 0xce, 0x8d, 0x24, 0x12, 0xa7, 0xe9, + 0x6d, 0x02, 0x17, 0x54, 0x2c, 0x22, 0x71, 0xa2, 0x31, 0x3f, 0x70, 0xdd, 0x28, 0xdd, 0xa5, 0x9f, + 0x62, 0x2b, 0x71, 0x21, 0xd7, 0x4a, 0xfc, 0x93, 0x16, 0x9c, 0x4a, 0xd6, 0xc3, 0x1e, 0x4d, 0x7f, + 0xc1, 0x82, 0xd3, 0xcc, 0x56, 0xce, 0x6a, 0x4d, 0x5b, 0xe6, 0x5f, 0xe8, 0x18, 0x5f, 0x29, 0xa7, + 0xc5, 0x71, 0xa0, 0x92, 0xb5, 0x2c, 0xd6, 0x38, 0xbb, 0x46, 0xfb, 0xbf, 0xf7, 0xc1, 0x74, 0x5e, + 0x60, 0x26, 0xf6, 0x4c, 0xc4, 0xb9, 0x5b, 0xdd, 0x25, 0x77, 0x84, 0x33, 0x7e, 0xfc, 0x4c, 0x84, + 0x83, 0xb1, 0xc4, 0x27, 0x53, 0x6f, 0x14, 0x7a, 0x4c, 0xbd, 0xb1, 0x03, 0x93, 0x77, 0x76, 0x88, + 0x77, 0xd3, 0x0b, 0x9d, 0xc8, 0x0d, 0xb7, 0x5c, 0x66, 0x57, 0xe6, 0xf3, 0x46, 0xe6, 0xeb, 0x9d, + 0xbc, 0x9d, 0x24, 0x38, 0x3c, 0x98, 0x3d, 0x67, 0x00, 0xe2, 0x26, 0xf3, 0x8d, 0x04, 0xa7, 0x99, + 0xa6, 0x33, 0x97, 0xf4, 0x3d, 0xe0, 0xcc, 0x25, 0x4d, 0x57, 0x78, 0xa3, 0xc8, 0x37, 0x00, 0xec, + 0xc6, 0xb8, 0xa6, 0xa0, 0x58, 0xa3, 0x40, 0x9f, 0x04, 0xa4, 0x67, 0x66, 0x32, 0xe2, 0x62, 0x3e, + 0x7b, 0xef, 0x60, 0x16, 0xad, 0xa7, 0xb0, 0x87, 0x07, 0xb3, 0x53, 0x14, 0x5a, 0xf6, 0xe8, 0xcd, + 0x33, 0x0e, 0x26, 0x96, 0xc1, 0x08, 0xdd, 0x86, 0x09, 0x0a, 0x65, 0x2b, 0x4a, 0x06, 0xdd, 0xe4, + 0xb7, 0xc5, 0xa7, 0xef, 0x1d, 0xcc, 0x4e, 0xac, 0x27, 0x70, 0x79, 0xac, 0x53, 0x4c, 0xd0, 0xcb, + 0x30, 0x16, 0xcf, 0xab, 0x6b, 0x64, 0x9f, 0x07, 0xb9, 0x29, 0x71, 0x85, 0xf7, 0x9a, 0x81, 0xc1, + 0x09, 0x4a, 0xfb, 0x0b, 0x16, 0x9c, 0xcd, 0xcd, 0x1e, 0x8e, 0x2e, 0xc1, 0x90, 0xd3, 0x72, 0xb9, + 0xf9, 0x42, 0x1c, 0x35, 0x4c, 0x4d, 0x56, 0x29, 0x73, 0xe3, 0x85, 0xc2, 0xd2, 0x1d, 0x7e, 0xd7, + 0xf5, 0xea, 0xc9, 0x1d, 0xfe, 0x9a, 0xeb, 0xd5, 0x31, 0xc3, 0xa8, 0x23, 0xab, 0x98, 0xfb, 0x14, + 0xe1, 0xab, 0x74, 0xad, 0x66, 0xe4, 0x19, 0x3f, 0xd9, 0x66, 0xa0, 0xa7, 0x75, 0x53, 0xa3, 0xf0, + 0x2a, 0xcc, 0x35, 0x33, 0x7e, 0xbf, 0x05, 0xe2, 0xe9, 0x72, 0x0f, 0x67, 0xf2, 0xc7, 0x61, 0x64, + 0x2f, 0x9d, 0xb5, 0xee, 0x42, 0xfe, 0x5b, 0x6e, 0x11, 0xed, 0x5b, 0x09, 0xda, 0x46, 0x86, 0x3a, + 0x83, 0x97, 0x5d, 0x07, 0x81, 0x5d, 0x26, 0xcc, 0xa0, 0xd0, 0xbd, 0x35, 0xcf, 0x01, 0xd4, 0x19, + 0x2d, 0x4b, 0x65, 0x5b, 0x30, 0x25, 0xae, 0x65, 0x85, 0xc1, 0x1a, 0x95, 0xfd, 0x2f, 0x0a, 0x30, + 0x2c, 0xb3, 0xa4, 0xb5, 0xbd, 0x5e, 0xd4, 0x7e, 0x47, 0x4a, 0x9b, 0x8c, 0xe6, 0xa1, 0xc4, 0xf4, + 0xd2, 0x95, 0x58, 0x5b, 0xaa, 0xb4, 0x42, 0x6b, 0x12, 0x81, 0x63, 0x1a, 0xba, 0x3b, 0x86, 0xed, + 0x4d, 0x46, 0x9e, 0x78, 0x68, 0x5b, 0xe5, 0x60, 0x2c, 0xf1, 0xe8, 0xa3, 0x30, 0xc1, 0xcb, 0x05, + 0x7e, 0xcb, 0xd9, 0xe6, 0xb6, 0xac, 0x7e, 0x15, 0xbd, 0x64, 0x62, 0x2d, 0x81, 0x3b, 0x3c, 0x98, + 0x3d, 0x95, 0x84, 0x31, 0x23, 0x6d, 0x8a, 0x0b, 0x73, 0x59, 0xe3, 0x95, 0xd0, 0x5d, 0x3d, 0xe5, + 0xe9, 0x16, 0xa3, 0xb0, 0x4e, 0x67, 0x7f, 0x1a, 0x50, 0x3a, 0x5f, 0x1c, 0x7a, 0x8d, 0xbb, 0x3c, + 0xbb, 0x01, 0xa9, 0x77, 0x32, 0xda, 0xea, 0x31, 0x3a, 0xe4, 0x1b, 0x39, 0x5e, 0x0a, 0xab, 0xf2, + 0xf6, 0xff, 0x57, 0x84, 0x89, 0x64, 0x54, 0x00, 0x74, 0x15, 0x06, 0xb8, 0x48, 0x29, 0xd8, 0x77, + 0xf0, 0x09, 0xd2, 0x62, 0x09, 0xb0, 0xc3, 0x55, 0x48, 0xa5, 0xa2, 0x3c, 0x7a, 0x03, 0x86, 0xeb, + 0xfe, 0x1d, 0xef, 0x8e, 0x13, 0xd4, 0x17, 0x2a, 0x65, 0x31, 0x9d, 0x33, 0x15, 0x15, 0xcb, 0x31, + 0x99, 0x1e, 0x9f, 0x80, 0xd9, 0xbf, 0x63, 0x14, 0xd6, 0xd9, 0xa1, 0x0d, 0x96, 0x64, 0x62, 0xcb, + 0xdd, 0x5e, 0x73, 0x5a, 0x9d, 0xde, 0xbf, 0x2c, 0x49, 0x22, 0x8d, 0xf3, 0xa8, 0xc8, 0x44, 0xc1, + 0x11, 0x38, 0x66, 0x84, 0x3e, 0x0b, 0x53, 0x61, 0x8e, 0xe9, 0x24, 0x2f, 0x7d, 0x68, 0x27, 0x6b, + 0xc2, 0xe2, 0x43, 0xf7, 0x0e, 0x66, 0xa7, 0xb2, 0x8c, 0x2c, 0x59, 0xd5, 0xd8, 0x5f, 0x3a, 0x05, + 0xc6, 0x22, 0x36, 0xb2, 0x49, 0x5b, 0xc7, 0x94, 0x4d, 0x1a, 0xc3, 0x10, 0x69, 0xb6, 0xa2, 0xfd, + 0x65, 0x37, 0x10, 0x63, 0x92, 0xc9, 0x73, 0x45, 0xd0, 0xa4, 0x79, 0x4a, 0x0c, 0x56, 0x7c, 0xb2, + 0x53, 0x7e, 0x17, 0xbf, 0x85, 0x29, 0xbf, 0xfb, 0x4e, 0x30, 0xe5, 0xf7, 0x3a, 0x0c, 0x6e, 0xbb, + 0x11, 0x26, 0x2d, 0x5f, 0x5c, 0xe6, 0x32, 0xe7, 0xe1, 0x15, 0x4e, 0x92, 0x4e, 0x2e, 0x2b, 0x10, + 0x58, 0x32, 0x41, 0xaf, 0xa9, 0x15, 0x38, 0x90, 0xaf, 0x70, 0x49, 0x3b, 0xaf, 0x64, 0xae, 0x41, + 0x91, 0xd8, 0x7b, 0xf0, 0x7e, 0x13, 0x7b, 0xaf, 0xca, 0x74, 0xdc, 0x43, 0xf9, 0x8f, 0xd5, 0x58, + 0xb6, 0xed, 0x2e, 0x49, 0xb8, 0x6f, 0xe9, 0x29, 0xcc, 0x4b, 0xf9, 0x3b, 0x81, 0xca, 0x4e, 0xde, + 0x63, 0xe2, 0xf2, 0xef, 0xb7, 0xe0, 0x74, 0x2b, 0x2b, 0x9b, 0xbf, 0xf0, 0xf3, 0x78, 0xb1, 0x97, + 0x9c, 0xaf, 0xa9, 0xf4, 0xff, 0x5c, 0x47, 0x9a, 0x49, 0x86, 0xb3, 0xab, 0xa3, 0x1d, 0x1d, 0x6c, + 0xd6, 0x85, 0xbf, 0xc1, 0x63, 0x39, 0x19, 0xd0, 0x3b, 0xe4, 0x3d, 0xdf, 0xc8, 0xc8, 0xb6, 0xfd, + 0x78, 0x5e, 0xb6, 0xed, 0x9e, 0x73, 0x6c, 0xbf, 0xa6, 0x72, 0x9f, 0x8f, 0xe6, 0x4f, 0x25, 0x9e, + 0xd9, 0xbc, 0x6b, 0xc6, 0xf3, 0xd7, 0x54, 0xc6, 0xf3, 0x0e, 0x61, 0xb5, 0x79, 0x3e, 0xf3, 0xae, + 0x79, 0xce, 0xb5, 0x5c, 0xe5, 0xe3, 0xc7, 0x93, 0xab, 0xdc, 0x38, 0x6a, 0x78, 0xba, 0xec, 0xa7, + 0xbb, 0x1c, 0x35, 0x06, 0xdf, 0xce, 0x87, 0x0d, 0xcf, 0xcb, 0x3e, 0x79, 0x5f, 0x79, 0xd9, 0x6f, + 0xe9, 0x79, 0xce, 0x51, 0x97, 0x44, 0xde, 0x94, 0xa8, 0xc7, 0xec, 0xe6, 0xb7, 0xf4, 0x03, 0x70, + 0x2a, 0x9f, 0xaf, 0x3a, 0xe7, 0xd2, 0x7c, 0x33, 0x8f, 0xc0, 0x54, 0xd6, 0xf4, 0x53, 0x27, 0x93, + 0x35, 0xfd, 0xf4, 0xb1, 0x67, 0x4d, 0x3f, 0x73, 0x02, 0x59, 0xd3, 0x1f, 0x3a, 0xc1, 0xac, 0xe9, + 0xb7, 0x98, 0x73, 0x14, 0x0f, 0x00, 0x25, 0xc2, 0x80, 0x3f, 0x95, 0x13, 0x3f, 0x2d, 0x1d, 0x25, + 0x8a, 0x7f, 0x9c, 0x42, 0xe1, 0x98, 0x55, 0x46, 0x36, 0xf6, 0xe9, 0x07, 0x90, 0x8d, 0x7d, 0x3d, + 0xce, 0xc6, 0x7e, 0x36, 0x7f, 0xa8, 0x33, 0x9e, 0xd3, 0xe4, 0xe4, 0x60, 0xbf, 0xa5, 0xe7, 0x4e, + 0x7f, 0xb8, 0x83, 0x15, 0x2c, 0x4b, 0xa1, 0xdc, 0x21, 0x63, 0xfa, 0xab, 0x3c, 0x63, 0xfa, 0x23, + 0xf9, 0x3b, 0x79, 0xf2, 0xb8, 0x33, 0xf2, 0xa4, 0xd3, 0x76, 0xa9, 0x00, 0xaa, 0x2c, 0xe0, 0x79, + 0x4e, 0xbb, 0x54, 0x04, 0xd6, 0x74, 0xbb, 0x14, 0x0a, 0xc7, 0xac, 0xec, 0x1f, 0x2c, 0xc0, 0xf9, + 0xce, 0xeb, 0x2d, 0xd6, 0x92, 0x57, 0x62, 0x87, 0x80, 0x84, 0x96, 0x9c, 0xdf, 0xd9, 0x62, 0xaa, + 0x9e, 0xe3, 0x41, 0x5e, 0x81, 0x49, 0xf5, 0x0e, 0xa7, 0xe1, 0xd6, 0xf6, 0xd7, 0xe3, 0x6b, 0xb2, + 0x8a, 0x9c, 0x50, 0x4d, 0x12, 0xe0, 0x74, 0x19, 0xb4, 0x00, 0xe3, 0x06, 0xb0, 0xbc, 0x2c, 0xee, + 0x66, 0x71, 0x88, 0x6d, 0x13, 0x8d, 0x93, 0xf4, 0xf6, 0x97, 0x2d, 0x78, 0x28, 0x27, 0xdd, 0x68, + 0xcf, 0xe1, 0x0e, 0xb7, 0x60, 0xbc, 0x65, 0x16, 0xed, 0x12, 0xa1, 0xd5, 0x48, 0x6a, 0xaa, 0xda, + 0x9a, 0x40, 0xe0, 0x24, 0x53, 0xfb, 0x67, 0x0b, 0x70, 0xae, 0xa3, 0x63, 0x29, 0xc2, 0x70, 0x66, + 0xbb, 0x19, 0x3a, 0x4b, 0x01, 0xa9, 0x13, 0x2f, 0x72, 0x9d, 0x46, 0xb5, 0x45, 0x6a, 0x9a, 0x9d, + 0x83, 0x79, 0x68, 0x5e, 0x59, 0xab, 0x2e, 0xa4, 0x29, 0x70, 0x4e, 0x49, 0xb4, 0x0a, 0x28, 0x8d, + 0x11, 0x23, 0xcc, 0x42, 0xe7, 0xa7, 0xf9, 0xe1, 0x8c, 0x12, 0xe8, 0x03, 0x30, 0xaa, 0x1c, 0x56, + 0xb5, 0x11, 0x67, 0x1b, 0x3b, 0xd6, 0x11, 0xd8, 0xa4, 0x43, 0x97, 0x79, 0xee, 0x05, 0x91, 0xa5, + 0x43, 0x18, 0x45, 0xc6, 0x65, 0x62, 0x05, 0x01, 0xc6, 0x3a, 0xcd, 0xe2, 0x4b, 0xbf, 0xf1, 0x07, + 0xe7, 0xdf, 0xf3, 0x5b, 0x7f, 0x70, 0xfe, 0x3d, 0xbf, 0xfb, 0x07, 0xe7, 0xdf, 0xf3, 0xdd, 0xf7, + 0xce, 0x5b, 0xbf, 0x71, 0xef, 0xbc, 0xf5, 0x5b, 0xf7, 0xce, 0x5b, 0xbf, 0x7b, 0xef, 0xbc, 0xf5, + 0xfb, 0xf7, 0xce, 0x5b, 0x5f, 0xfc, 0xc3, 0xf3, 0xef, 0xf9, 0x38, 0x8a, 0x03, 0x88, 0xce, 0xd3, + 0xd1, 0x99, 0xdf, 0xbb, 0xfc, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xc2, 0xee, 0x75, 0x5f, 0xef, 0x0c, 0x01, 0x00, } @@ -65016,7 +65016,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ExternalTrafficPolicy = ServiceExternalTrafficPolicyType(dAtA[iNdEx:postIndex]) + m.ExternalTrafficPolicy = ServiceExternalTrafficPolicy(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 12: if wireType != 0 { @@ -65274,7 +65274,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := ServiceInternalTrafficPolicyType(dAtA[iNdEx:postIndex]) + s := ServiceInternalTrafficPolicy(dAtA[iNdEx:postIndex]) m.InternalTrafficPolicy = &s iNdEx = postIndex default: diff --git a/cluster-autoscaler/vendor/k8s.io/api/core/v1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/core/v1/generated.proto index 854bcdeba029..da7b1ef1a1d0 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/core/v1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/core/v1/generated.proto @@ -220,7 +220,6 @@ message CSIPersistentVolumeSource { // controllerExpandSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // ControllerExpandVolume call. - // This is an beta field and requires enabling ExpandCSIVolumes feature gate. // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional @@ -1049,11 +1048,8 @@ message EmptyDirVolumeSource { // +structType=atomic message EndpointAddress { // The IP of this endpoint. - // May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), - // or link-local multicast ((224.0.0.0/24). - // IPv6 is also accepted but not fully supported on all platforms. Also, certain - // kubernetes components, like kube-proxy, are not IPv6 ready. - // TODO: This should allow hostname or IP, See #4447. + // May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), + // or link-local multicast (224.0.0.0/24 or ff02::/16). optional string ip = 1; // The Hostname of this endpoint @@ -2484,6 +2480,10 @@ message NodeStatus { // Note: This field is declared as mergeable, but the merge key is not sufficiently // unique, which can cause data corruption when it is merged. Callers should instead // use a full-replacement patch. See https://pr.k8s.io/79391 for an example. + // Consumers should assume that addresses can change during the + // lifetime of a Node. However, there are some exceptions where this may not + // be possible, such as Pods that inherit a Node's address in its own status or + // consumers of the downward API (status.hostIP). // +optional // +patchMergeKey=type // +patchStrategy=merge @@ -4514,7 +4514,8 @@ message ResourceRequirements { // // This field is immutable. // - // +listType=set + // +listType=map + // +listMapKey=name // +featureGate=DynamicResourceAllocation // +optional repeated ResourceClaim claims = 3; diff --git a/cluster-autoscaler/vendor/k8s.io/api/core/v1/toleration.go b/cluster-autoscaler/vendor/k8s.io/api/core/v1/toleration.go index 9341abf89192..e803d518b5cc 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/core/v1/toleration.go +++ b/cluster-autoscaler/vendor/k8s.io/api/core/v1/toleration.go @@ -28,15 +28,13 @@ func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool { // ToleratesTaint checks if the toleration tolerates the taint. // The matching follows the rules below: -// (1) Empty toleration.effect means to match all taint effects, // -// otherwise taint effect must equal to toleration.effect. -// -// (2) If toleration.operator is 'Exists', it means to match all taint values. -// (3) Empty toleration.key means to match all taint keys. -// -// If toleration.key is empty, toleration.operator must be 'Exists'; -// this combination means to match all taint values and all taint keys. +// 1. Empty toleration.effect means to match all taint effects, +// otherwise taint effect must equal to toleration.effect. +// 2. If toleration.operator is 'Exists', it means to match all taint values. +// 3. Empty toleration.key means to match all taint keys. +// If toleration.key is empty, toleration.operator must be 'Exists'; +// this combination means to match all taint values and all taint keys. func (t *Toleration) ToleratesTaint(taint *Taint) bool { if len(t.Effect) > 0 && t.Effect != taint.Effect { return false diff --git a/cluster-autoscaler/vendor/k8s.io/api/core/v1/types.go b/cluster-autoscaler/vendor/k8s.io/api/core/v1/types.go index 87230fd91812..341560b2c848 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/core/v1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/core/v1/types.go @@ -1826,7 +1826,6 @@ type CSIPersistentVolumeSource struct { // controllerExpandSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // ControllerExpandVolume call. - // This is an beta field and requires enabling ExpandCSIVolumes feature gate. // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional @@ -2322,7 +2321,8 @@ type ResourceRequirements struct { // // This field is immutable. // - // +listType=set + // +listType=map + // +listMapKey=name // +featureGate=DynamicResourceAllocation // +optional Claims []ResourceClaim `json:"claims,omitempty" protobuf:"bytes,3,opt,name=claims"` @@ -2721,6 +2721,10 @@ const ( // TerminationByKubelet reason in DisruptionTarget pod condition indicates that the termination // is initiated by kubelet PodReasonTerminationByKubelet = "TerminationByKubelet" + + // PodReasonPreemptionByScheduler reason in DisruptionTarget pod condition indicates that the + // disruption was initiated by scheduler's preemption. + PodReasonPreemptionByScheduler = "PreemptionByScheduler" ) // PodCondition contains details for the current condition of this pod. @@ -4368,34 +4372,47 @@ const ( ServiceTypeExternalName ServiceType = "ExternalName" ) -// ServiceInternalTrafficPolicyType describes how nodes distribute service traffic they +// ServiceInternalTrafficPolicy describes how nodes distribute service traffic they // receive on the ClusterIP. // +enum -type ServiceInternalTrafficPolicyType string +type ServiceInternalTrafficPolicy string const ( // ServiceInternalTrafficPolicyCluster routes traffic to all endpoints. - ServiceInternalTrafficPolicyCluster ServiceInternalTrafficPolicyType = "Cluster" + ServiceInternalTrafficPolicyCluster ServiceInternalTrafficPolicy = "Cluster" // ServiceInternalTrafficPolicyLocal routes traffic only to endpoints on the same // node as the client pod (dropping the traffic if there are no local endpoints). - ServiceInternalTrafficPolicyLocal ServiceInternalTrafficPolicyType = "Local" + ServiceInternalTrafficPolicyLocal ServiceInternalTrafficPolicy = "Local" ) -// ServiceExternalTrafficPolicyType describes how nodes distribute service traffic they +// for backwards compat +// +enum +type ServiceInternalTrafficPolicyType = ServiceInternalTrafficPolicy + +// ServiceExternalTrafficPolicy describes how nodes distribute service traffic they // receive on one of the Service's "externally-facing" addresses (NodePorts, ExternalIPs, -// and LoadBalancer IPs). +// and LoadBalancer IPs. // +enum -type ServiceExternalTrafficPolicyType string +type ServiceExternalTrafficPolicy string const ( - // ServiceExternalTrafficPolicyTypeCluster routes traffic to all endpoints. - ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster" + // ServiceExternalTrafficPolicyCluster routes traffic to all endpoints. + ServiceExternalTrafficPolicyCluster ServiceExternalTrafficPolicy = "Cluster" - // ServiceExternalTrafficPolicyTypeLocal preserves the source IP of the traffic by + // ServiceExternalTrafficPolicyLocal preserves the source IP of the traffic by // routing only to endpoints on the same node as the traffic was received on // (dropping the traffic if there are no local endpoints). - ServiceExternalTrafficPolicyTypeLocal ServiceExternalTrafficPolicyType = "Local" + ServiceExternalTrafficPolicyLocal ServiceExternalTrafficPolicy = "Local" +) + +// for backwards compat +// +enum +type ServiceExternalTrafficPolicyType = ServiceExternalTrafficPolicy + +const ( + ServiceExternalTrafficPolicyTypeLocal = ServiceExternalTrafficPolicyLocal + ServiceExternalTrafficPolicyTypeCluster = ServiceExternalTrafficPolicyCluster ) // These are the valid conditions of a service. @@ -4628,7 +4645,7 @@ type ServiceSpec struct { // a NodePort from within the cluster may need to take traffic policy into account // when picking a node. // +optional - ExternalTrafficPolicy ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty" protobuf:"bytes,11,opt,name=externalTrafficPolicy"` + ExternalTrafficPolicy ServiceExternalTrafficPolicy `json:"externalTrafficPolicy,omitempty" protobuf:"bytes,11,opt,name=externalTrafficPolicy"` // healthCheckNodePort specifies the healthcheck nodePort for the service. // This only applies when type is set to LoadBalancer and @@ -4725,7 +4742,7 @@ type ServiceSpec struct { // "Cluster", uses the standard behavior of routing to all endpoints evenly // (possibly modified by topology and other features). // +optional - InternalTrafficPolicy *ServiceInternalTrafficPolicyType `json:"internalTrafficPolicy,omitempty" protobuf:"bytes,22,opt,name=internalTrafficPolicy"` + InternalTrafficPolicy *ServiceInternalTrafficPolicy `json:"internalTrafficPolicy,omitempty" protobuf:"bytes,22,opt,name=internalTrafficPolicy"` } // ServicePort contains information on service's port. @@ -4946,11 +4963,8 @@ type EndpointSubset struct { // +structType=atomic type EndpointAddress struct { // The IP of this endpoint. - // May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), - // or link-local multicast ((224.0.0.0/24). - // IPv6 is also accepted but not fully supported on all platforms. Also, certain - // kubernetes components, like kube-proxy, are not IPv6 ready. - // TODO: This should allow hostname or IP, See #4447. + // May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), + // or link-local multicast (224.0.0.0/24 or ff02::/16). IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"` // The Hostname of this endpoint // +optional @@ -5205,6 +5219,10 @@ type NodeStatus struct { // Note: This field is declared as mergeable, but the merge key is not sufficiently // unique, which can cause data corruption when it is merged. Callers should instead // use a full-replacement patch. See https://pr.k8s.io/79391 for an example. + // Consumers should assume that addresses can change during the + // lifetime of a Node. However, there are some exceptions where this may not + // be possible, such as Pods that inherit a Node's address in its own status or + // consumers of the downward API (status.hostIP). // +optional // +patchMergeKey=type // +patchStrategy=merge diff --git a/cluster-autoscaler/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index 6c6fe2e0064f..e3f73cf2f0a7 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AWSElasticBlockStoreVolumeSource = map[string]string{ @@ -126,7 +126,7 @@ var map_CSIPersistentVolumeSource = map[string]string{ "controllerPublishSecretRef": "controllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", "nodeStageSecretRef": "nodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", "nodePublishSecretRef": "nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", - "controllerExpandSecretRef": "controllerExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerExpandVolume call. This is an beta field and requires enabling ExpandCSIVolumes feature gate. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "controllerExpandSecretRef": "controllerExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerExpandVolume call. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", "nodeExpandSecretRef": "nodeExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeExpandVolume call. This is an alpha field and requires enabling CSINodeExpandSecret feature gate. This field is optional, may be omitted if no secret is required. If the secret object contains more than one secret, all secrets are passed.", } @@ -502,7 +502,7 @@ func (EmptyDirVolumeSource) SwaggerDoc() map[string]string { var map_EndpointAddress = map[string]string{ "": "EndpointAddress is a tuple that describes single IP address.", - "ip": "The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24). IPv6 is also accepted but not fully supported on all platforms. Also, certain kubernetes components, like kube-proxy, are not IPv6 ready.", + "ip": "The IP of this endpoint. May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), or link-local multicast (224.0.0.0/24 or ff02::/16).", "hostname": "The Hostname of this endpoint", "nodeName": "Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.", "targetRef": "Reference to object providing the endpoint.", @@ -1213,7 +1213,7 @@ var map_NodeStatus = map[string]string{ "allocatable": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.", "phase": "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.", "conditions": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition", - "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See https://pr.k8s.io/79391 for an example.", + "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See https://pr.k8s.io/79391 for an example. Consumers should assume that addresses can change during the lifetime of a Node. However, there are some exceptions where this may not be possible, such as Pods that inherit a Node's address in its own status or consumers of the downward API (status.hostIP).", "daemonEndpoints": "Endpoints of daemons running on the Node.", "nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#info", "images": "List of container images on this node", diff --git a/cluster-autoscaler/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index 2bf1c8ad6487..9a3b46fdc35b 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/cluster-autoscaler/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -5517,7 +5517,7 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { } if in.InternalTrafficPolicy != nil { in, out := &in.InternalTrafficPolicy, &out.InternalTrafficPolicy - *out = new(ServiceInternalTrafficPolicyType) + *out = new(ServiceInternalTrafficPolicy) **out = **in } return diff --git a/cluster-autoscaler/vendor/k8s.io/api/discovery/v1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1/generated.proto index 9cbe46394a36..2be04f91f279 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/discovery/v1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1/generated.proto @@ -115,9 +115,8 @@ message EndpointHints { // EndpointPort represents a Port used by an EndpointSlice // +structType=atomic message EndpointPort { - // The name of this port. All ports in an EndpointSlice must have a unique - // name. If the EndpointSlice is dervied from a Kubernetes service, this - // corresponds to the Service.ports[].name. + // name represents the name of this port. All ports in an EndpointSlice must have a unique name. + // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. @@ -125,17 +124,17 @@ message EndpointPort { // Default is empty string. optional string name = 1; - // The IP protocol for this port. + // protocol represents the IP protocol for this port. // Must be UDP, TCP, or SCTP. // Default is TCP. optional string protocol = 2; - // The port number of the endpoint. + // port represents the port number of the endpoint. // If this is not specified, ports are not restricted and must be // interpreted in the context of the specific consumer. optional int32 port = 3; - // The application protocol for this port. + // appProtocol represents the application protocol for this port. // This field follows standard Kubernetes label syntax. // Un-prefixed names are reserved for IANA standard service names (as per // RFC-6335 and https://www.iana.org/assignments/service-names). @@ -183,7 +182,7 @@ message EndpointSliceList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // List of endpoint slices + // items is the list of endpoint slices repeated EndpointSlice items = 2; } diff --git a/cluster-autoscaler/vendor/k8s.io/api/discovery/v1/types.go b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1/types.go index 2df80c3d5c26..ba4f05e7ff07 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/discovery/v1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1/types.go @@ -29,9 +29,11 @@ import ( // labels, which must be joined to produce the full set of endpoints. type EndpointSlice struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // addressType specifies the type of address carried by this EndpointSlice. // All addresses in this slice must be the same type. This field is // immutable after creation. The following address types are currently @@ -40,10 +42,12 @@ type EndpointSlice struct { // * IPv6: Represents an IPv6 Address. // * FQDN: Represents a Fully Qualified Domain Name. AddressType AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"` + // endpoints is a list of unique endpoints in this slice. Each slice may // include a maximum of 1000 endpoints. // +listType=atomic Endpoints []Endpoint `json:"endpoints" protobuf:"bytes,2,rep,name=endpoints"` + // ports specifies the list of network ports exposed by each endpoint in // this slice. Each port must have a unique name. When ports is empty, it // indicates that there are no defined ports. When a port is defined with a @@ -61,8 +65,10 @@ type AddressType string const ( // AddressTypeIPv4 represents an IPv4 Address. AddressTypeIPv4 = AddressType(v1.IPv4Protocol) + // AddressTypeIPv6 represents an IPv6 Address. AddressTypeIPv6 = AddressType(v1.IPv6Protocol) + // AddressTypeFQDN represents a FQDN. AddressTypeFQDN = AddressType("FQDN") ) @@ -77,8 +83,10 @@ type Endpoint struct { // use the first element. Refer to: https://issue.k8s.io/106267 // +listType=set Addresses []string `json:"addresses" protobuf:"bytes,1,rep,name=addresses"` + // conditions contains information about the current status of the endpoint. Conditions EndpointConditions `json:"conditions,omitempty" protobuf:"bytes,2,opt,name=conditions"` + // hostname of this endpoint. This field may be used by consumers of // endpoints to distinguish endpoints from each other (e.g. in DNS names). // Multiple endpoints which use the same hostname should be considered @@ -86,6 +94,7 @@ type Endpoint struct { // Label (RFC 1123) validation. // +optional Hostname *string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"` + // targetRef is a reference to a Kubernetes object that represents this // endpoint. // +optional @@ -104,9 +113,11 @@ type Endpoint struct { // be used to determine endpoints local to a Node. // +optional NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,6,opt,name=nodeName"` + // zone is the name of the Zone this endpoint exists in. // +optional Zone *string `json:"zone,omitempty" protobuf:"bytes,7,opt,name=zone"` + // hints contains information associated with how an endpoint should be // consumed. // +optional @@ -154,24 +165,26 @@ type ForZone struct { // EndpointPort represents a Port used by an EndpointSlice // +structType=atomic type EndpointPort struct { - // The name of this port. All ports in an EndpointSlice must have a unique - // name. If the EndpointSlice is dervied from a Kubernetes service, this - // corresponds to the Service.ports[].name. + // name represents the name of this port. All ports in an EndpointSlice must have a unique name. + // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. // * must start and end with an alphanumeric character. // Default is empty string. Name *string `json:"name,omitempty" protobuf:"bytes,1,name=name"` - // The IP protocol for this port. + + // protocol represents the IP protocol for this port. // Must be UDP, TCP, or SCTP. // Default is TCP. Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,2,name=protocol"` - // The port number of the endpoint. + + // port represents the port number of the endpoint. // If this is not specified, ports are not restricted and must be // interpreted in the context of the specific consumer. Port *int32 `json:"port,omitempty" protobuf:"bytes,3,opt,name=port"` - // The application protocol for this port. + + // appProtocol represents the application protocol for this port. // This field follows standard Kubernetes label syntax. // Un-prefixed names are reserved for IANA standard service names (as per // RFC-6335 and https://www.iana.org/assignments/service-names). @@ -186,9 +199,11 @@ type EndpointPort struct { // EndpointSliceList represents a list of endpoint slices type EndpointSliceList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // List of endpoint slices + + // items is the list of endpoint slices Items []EndpointSlice `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/cluster-autoscaler/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go index 746408b66853..3caa8673f0f9 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Endpoint = map[string]string{ @@ -65,10 +65,10 @@ func (EndpointHints) SwaggerDoc() map[string]string { var map_EndpointPort = map[string]string{ "": "EndpointPort represents a Port used by an EndpointSlice", - "name": "The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", - "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", - "port": "The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", - "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", + "name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + "protocol": "protocol represents the IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", + "port": "port represents the port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", + "appProtocol": "appProtocol represents the application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", } func (EndpointPort) SwaggerDoc() map[string]string { @@ -90,7 +90,7 @@ func (EndpointSlice) SwaggerDoc() map[string]string { var map_EndpointSliceList = map[string]string{ "": "EndpointSliceList represents a list of endpoint slices", "metadata": "Standard list metadata.", - "items": "List of endpoint slices", + "items": "items is the list of endpoint slices", } func (EndpointSliceList) SwaggerDoc() map[string]string { diff --git a/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/generated.proto index 2979e64a717c..8b6c360b0e6d 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/generated.proto @@ -118,9 +118,8 @@ message EndpointHints { // EndpointPort represents a Port used by an EndpointSlice message EndpointPort { - // The name of this port. All ports in an EndpointSlice must have a unique - // name. If the EndpointSlice is dervied from a Kubernetes service, this - // corresponds to the Service.ports[].name. + // name represents the name of this port. All ports in an EndpointSlice must have a unique name. + // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. @@ -128,17 +127,17 @@ message EndpointPort { // Default is empty string. optional string name = 1; - // The IP protocol for this port. + // protocol represents the IP protocol for this port. // Must be UDP, TCP, or SCTP. // Default is TCP. optional string protocol = 2; - // The port number of the endpoint. + // port represents the port number of the endpoint. // If this is not specified, ports are not restricted and must be // interpreted in the context of the specific consumer. optional int32 port = 3; - // The application protocol for this port. + // appProtocol represents the application protocol for this port. // This field follows standard Kubernetes label syntax. // Un-prefixed names are reserved for IANA standard service names (as per // RFC-6335 and https://www.iana.org/assignments/service-names). @@ -186,7 +185,7 @@ message EndpointSliceList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // List of endpoint slices + // items is the list of endpoint slices repeated EndpointSlice items = 2; } diff --git a/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/types.go b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/types.go index 7a02bead595c..f09f7f320cd2 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/types.go @@ -33,9 +33,11 @@ import ( // labels, which must be joined to produce the full set of endpoints. type EndpointSlice struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // addressType specifies the type of address carried by this EndpointSlice. // All addresses in this slice must be the same type. This field is // immutable after creation. The following address types are currently @@ -44,10 +46,12 @@ type EndpointSlice struct { // * IPv6: Represents an IPv6 Address. // * FQDN: Represents a Fully Qualified Domain Name. AddressType AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"` + // endpoints is a list of unique endpoints in this slice. Each slice may // include a maximum of 1000 endpoints. // +listType=atomic Endpoints []Endpoint `json:"endpoints" protobuf:"bytes,2,rep,name=endpoints"` + // ports specifies the list of network ports exposed by each endpoint in // this slice. Each port must have a unique name. When ports is empty, it // indicates that there are no defined ports. When a port is defined with a @@ -64,8 +68,10 @@ type AddressType string const ( // AddressTypeIPv4 represents an IPv4 Address. AddressTypeIPv4 = AddressType(v1.IPv4Protocol) + // AddressTypeIPv6 represents an IPv6 Address. AddressTypeIPv6 = AddressType(v1.IPv6Protocol) + // AddressTypeFQDN represents a FQDN. AddressTypeFQDN = AddressType("FQDN") ) @@ -80,8 +86,10 @@ type Endpoint struct { // use the first element. Refer to: https://issue.k8s.io/106267 // +listType=set Addresses []string `json:"addresses" protobuf:"bytes,1,rep,name=addresses"` + // conditions contains information about the current status of the endpoint. Conditions EndpointConditions `json:"conditions,omitempty" protobuf:"bytes,2,opt,name=conditions"` + // hostname of this endpoint. This field may be used by consumers of // endpoints to distinguish endpoints from each other (e.g. in DNS names). // Multiple endpoints which use the same hostname should be considered @@ -89,10 +97,12 @@ type Endpoint struct { // Label (RFC 1123) validation. // +optional Hostname *string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"` + // targetRef is a reference to a Kubernetes object that represents this // endpoint. // +optional TargetRef *v1.ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,4,opt,name=targetRef"` + // topology contains arbitrary topology information associated with the // endpoint. These key/value pairs must conform with the label format. // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels @@ -108,10 +118,12 @@ type Endpoint struct { // This field is deprecated and will be removed in future api versions. // +optional Topology map[string]string `json:"topology,omitempty" protobuf:"bytes,5,opt,name=topology"` + // nodeName represents the name of the Node hosting this endpoint. This can // be used to determine endpoints local to a Node. // +optional NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,6,opt,name=nodeName"` + // hints contains information associated with how an endpoint should be // consumed. // +featureGate=TopologyAwareHints @@ -159,24 +171,26 @@ type ForZone struct { // EndpointPort represents a Port used by an EndpointSlice type EndpointPort struct { - // The name of this port. All ports in an EndpointSlice must have a unique - // name. If the EndpointSlice is dervied from a Kubernetes service, this - // corresponds to the Service.ports[].name. + // name represents the name of this port. All ports in an EndpointSlice must have a unique name. + // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. // * must start and end with an alphanumeric character. // Default is empty string. Name *string `json:"name,omitempty" protobuf:"bytes,1,name=name"` - // The IP protocol for this port. + + // protocol represents the IP protocol for this port. // Must be UDP, TCP, or SCTP. // Default is TCP. Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,2,name=protocol"` - // The port number of the endpoint. + + // port represents the port number of the endpoint. // If this is not specified, ports are not restricted and must be // interpreted in the context of the specific consumer. Port *int32 `json:"port,omitempty" protobuf:"bytes,3,opt,name=port"` - // The application protocol for this port. + + // appProtocol represents the application protocol for this port. // This field follows standard Kubernetes label syntax. // Un-prefixed names are reserved for IANA standard service names (as per // RFC-6335 and https://www.iana.org/assignments/service-names). @@ -195,9 +209,11 @@ type EndpointPort struct { // EndpointSliceList represents a list of endpoint slices type EndpointSliceList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // List of endpoint slices + + // items is the list of endpoint slices Items []EndpointSlice `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go index e1c974b3978e..b1d4c306ccd9 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Endpoint = map[string]string{ @@ -64,10 +64,10 @@ func (EndpointHints) SwaggerDoc() map[string]string { var map_EndpointPort = map[string]string{ "": "EndpointPort represents a Port used by an EndpointSlice", - "name": "The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", - "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", - "port": "The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", - "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", + "name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + "protocol": "protocol represents the IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", + "port": "port represents the port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", + "appProtocol": "appProtocol represents the application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", } func (EndpointPort) SwaggerDoc() map[string]string { @@ -89,7 +89,7 @@ func (EndpointSlice) SwaggerDoc() map[string]string { var map_EndpointSliceList = map[string]string{ "": "EndpointSliceList represents a list of endpoint slices", "metadata": "Standard list metadata.", - "items": "List of endpoint slices", + "items": "items is the list of endpoint slices", } func (EndpointSliceList) SwaggerDoc() map[string]string { diff --git a/cluster-autoscaler/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go index 797da63bb722..44ac0c3bb644 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Event = map[string]string{ diff --git a/cluster-autoscaler/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go index 0e6bd5a83c54..e6c28a4f8c0b 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Event = map[string]string{ diff --git a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go index 333142b3e3e8..863ebbc4a722 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go @@ -49,94 +49,10 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -func (m *AllowedCSIDriver) Reset() { *m = AllowedCSIDriver{} } -func (*AllowedCSIDriver) ProtoMessage() {} -func (*AllowedCSIDriver) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{0} -} -func (m *AllowedCSIDriver) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllowedCSIDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllowedCSIDriver) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllowedCSIDriver.Merge(m, src) -} -func (m *AllowedCSIDriver) XXX_Size() int { - return m.Size() -} -func (m *AllowedCSIDriver) XXX_DiscardUnknown() { - xxx_messageInfo_AllowedCSIDriver.DiscardUnknown(m) -} - -var xxx_messageInfo_AllowedCSIDriver proto.InternalMessageInfo - -func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } -func (*AllowedFlexVolume) ProtoMessage() {} -func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{1} -} -func (m *AllowedFlexVolume) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllowedFlexVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllowedFlexVolume) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllowedFlexVolume.Merge(m, src) -} -func (m *AllowedFlexVolume) XXX_Size() int { - return m.Size() -} -func (m *AllowedFlexVolume) XXX_DiscardUnknown() { - xxx_messageInfo_AllowedFlexVolume.DiscardUnknown(m) -} - -var xxx_messageInfo_AllowedFlexVolume proto.InternalMessageInfo - -func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } -func (*AllowedHostPath) ProtoMessage() {} -func (*AllowedHostPath) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{2} -} -func (m *AllowedHostPath) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllowedHostPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllowedHostPath) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllowedHostPath.Merge(m, src) -} -func (m *AllowedHostPath) XXX_Size() int { - return m.Size() -} -func (m *AllowedHostPath) XXX_DiscardUnknown() { - xxx_messageInfo_AllowedHostPath.DiscardUnknown(m) -} - -var xxx_messageInfo_AllowedHostPath proto.InternalMessageInfo - func (m *DaemonSet) Reset() { *m = DaemonSet{} } func (*DaemonSet) ProtoMessage() {} func (*DaemonSet) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{3} + return fileDescriptor_cdc93917efc28165, []int{0} } func (m *DaemonSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -164,7 +80,7 @@ var xxx_messageInfo_DaemonSet proto.InternalMessageInfo func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } func (*DaemonSetCondition) ProtoMessage() {} func (*DaemonSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{4} + return fileDescriptor_cdc93917efc28165, []int{1} } func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -192,7 +108,7 @@ var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } func (*DaemonSetList) ProtoMessage() {} func (*DaemonSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{5} + return fileDescriptor_cdc93917efc28165, []int{2} } func (m *DaemonSetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -220,7 +136,7 @@ var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } func (*DaemonSetSpec) ProtoMessage() {} func (*DaemonSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{6} + return fileDescriptor_cdc93917efc28165, []int{3} } func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -248,7 +164,7 @@ var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } func (*DaemonSetStatus) ProtoMessage() {} func (*DaemonSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{7} + return fileDescriptor_cdc93917efc28165, []int{4} } func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -276,7 +192,7 @@ var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } func (*DaemonSetUpdateStrategy) ProtoMessage() {} func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{8} + return fileDescriptor_cdc93917efc28165, []int{5} } func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -304,7 +220,7 @@ var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo func (m *Deployment) Reset() { *m = Deployment{} } func (*Deployment) ProtoMessage() {} func (*Deployment) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{9} + return fileDescriptor_cdc93917efc28165, []int{6} } func (m *Deployment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -332,7 +248,7 @@ var xxx_messageInfo_Deployment proto.InternalMessageInfo func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } func (*DeploymentCondition) ProtoMessage() {} func (*DeploymentCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{10} + return fileDescriptor_cdc93917efc28165, []int{7} } func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -360,7 +276,7 @@ var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo func (m *DeploymentList) Reset() { *m = DeploymentList{} } func (*DeploymentList) ProtoMessage() {} func (*DeploymentList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{11} + return fileDescriptor_cdc93917efc28165, []int{8} } func (m *DeploymentList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -388,7 +304,7 @@ var xxx_messageInfo_DeploymentList proto.InternalMessageInfo func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } func (*DeploymentRollback) ProtoMessage() {} func (*DeploymentRollback) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{12} + return fileDescriptor_cdc93917efc28165, []int{9} } func (m *DeploymentRollback) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -416,7 +332,7 @@ var xxx_messageInfo_DeploymentRollback proto.InternalMessageInfo func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } func (*DeploymentSpec) ProtoMessage() {} func (*DeploymentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{13} + return fileDescriptor_cdc93917efc28165, []int{10} } func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -444,7 +360,7 @@ var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } func (*DeploymentStatus) ProtoMessage() {} func (*DeploymentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{14} + return fileDescriptor_cdc93917efc28165, []int{11} } func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -472,7 +388,7 @@ var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } func (*DeploymentStrategy) ProtoMessage() {} func (*DeploymentStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{15} + return fileDescriptor_cdc93917efc28165, []int{12} } func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -497,38 +413,10 @@ func (m *DeploymentStrategy) XXX_DiscardUnknown() { var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo -func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } -func (*FSGroupStrategyOptions) ProtoMessage() {} -func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{16} -} -func (m *FSGroupStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FSGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *FSGroupStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_FSGroupStrategyOptions.Merge(m, src) -} -func (m *FSGroupStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *FSGroupStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_FSGroupStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_FSGroupStrategyOptions proto.InternalMessageInfo - func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } func (*HTTPIngressPath) ProtoMessage() {} func (*HTTPIngressPath) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{17} + return fileDescriptor_cdc93917efc28165, []int{13} } func (m *HTTPIngressPath) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -556,7 +444,7 @@ var xxx_messageInfo_HTTPIngressPath proto.InternalMessageInfo func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } func (*HTTPIngressRuleValue) ProtoMessage() {} func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{18} + return fileDescriptor_cdc93917efc28165, []int{14} } func (m *HTTPIngressRuleValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -581,66 +469,10 @@ func (m *HTTPIngressRuleValue) XXX_DiscardUnknown() { var xxx_messageInfo_HTTPIngressRuleValue proto.InternalMessageInfo -func (m *HostPortRange) Reset() { *m = HostPortRange{} } -func (*HostPortRange) ProtoMessage() {} -func (*HostPortRange) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{19} -} -func (m *HostPortRange) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HostPortRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HostPortRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_HostPortRange.Merge(m, src) -} -func (m *HostPortRange) XXX_Size() int { - return m.Size() -} -func (m *HostPortRange) XXX_DiscardUnknown() { - xxx_messageInfo_HostPortRange.DiscardUnknown(m) -} - -var xxx_messageInfo_HostPortRange proto.InternalMessageInfo - -func (m *IDRange) Reset() { *m = IDRange{} } -func (*IDRange) ProtoMessage() {} -func (*IDRange) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{20} -} -func (m *IDRange) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IDRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IDRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_IDRange.Merge(m, src) -} -func (m *IDRange) XXX_Size() int { - return m.Size() -} -func (m *IDRange) XXX_DiscardUnknown() { - xxx_messageInfo_IDRange.DiscardUnknown(m) -} - -var xxx_messageInfo_IDRange proto.InternalMessageInfo - func (m *IPBlock) Reset() { *m = IPBlock{} } func (*IPBlock) ProtoMessage() {} func (*IPBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{21} + return fileDescriptor_cdc93917efc28165, []int{15} } func (m *IPBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -668,7 +500,7 @@ var xxx_messageInfo_IPBlock proto.InternalMessageInfo func (m *Ingress) Reset() { *m = Ingress{} } func (*Ingress) ProtoMessage() {} func (*Ingress) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{22} + return fileDescriptor_cdc93917efc28165, []int{16} } func (m *Ingress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -696,7 +528,7 @@ var xxx_messageInfo_Ingress proto.InternalMessageInfo func (m *IngressBackend) Reset() { *m = IngressBackend{} } func (*IngressBackend) ProtoMessage() {} func (*IngressBackend) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{23} + return fileDescriptor_cdc93917efc28165, []int{17} } func (m *IngressBackend) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -724,7 +556,7 @@ var xxx_messageInfo_IngressBackend proto.InternalMessageInfo func (m *IngressList) Reset() { *m = IngressList{} } func (*IngressList) ProtoMessage() {} func (*IngressList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{24} + return fileDescriptor_cdc93917efc28165, []int{18} } func (m *IngressList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -752,7 +584,7 @@ var xxx_messageInfo_IngressList proto.InternalMessageInfo func (m *IngressLoadBalancerIngress) Reset() { *m = IngressLoadBalancerIngress{} } func (*IngressLoadBalancerIngress) ProtoMessage() {} func (*IngressLoadBalancerIngress) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{25} + return fileDescriptor_cdc93917efc28165, []int{19} } func (m *IngressLoadBalancerIngress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -780,7 +612,7 @@ var xxx_messageInfo_IngressLoadBalancerIngress proto.InternalMessageInfo func (m *IngressLoadBalancerStatus) Reset() { *m = IngressLoadBalancerStatus{} } func (*IngressLoadBalancerStatus) ProtoMessage() {} func (*IngressLoadBalancerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{26} + return fileDescriptor_cdc93917efc28165, []int{20} } func (m *IngressLoadBalancerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -808,7 +640,7 @@ var xxx_messageInfo_IngressLoadBalancerStatus proto.InternalMessageInfo func (m *IngressPortStatus) Reset() { *m = IngressPortStatus{} } func (*IngressPortStatus) ProtoMessage() {} func (*IngressPortStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{27} + return fileDescriptor_cdc93917efc28165, []int{21} } func (m *IngressPortStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -836,7 +668,7 @@ var xxx_messageInfo_IngressPortStatus proto.InternalMessageInfo func (m *IngressRule) Reset() { *m = IngressRule{} } func (*IngressRule) ProtoMessage() {} func (*IngressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{28} + return fileDescriptor_cdc93917efc28165, []int{22} } func (m *IngressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -864,7 +696,7 @@ var xxx_messageInfo_IngressRule proto.InternalMessageInfo func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } func (*IngressRuleValue) ProtoMessage() {} func (*IngressRuleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{29} + return fileDescriptor_cdc93917efc28165, []int{23} } func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -892,7 +724,7 @@ var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo func (m *IngressSpec) Reset() { *m = IngressSpec{} } func (*IngressSpec) ProtoMessage() {} func (*IngressSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{30} + return fileDescriptor_cdc93917efc28165, []int{24} } func (m *IngressSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -920,7 +752,7 @@ var xxx_messageInfo_IngressSpec proto.InternalMessageInfo func (m *IngressStatus) Reset() { *m = IngressStatus{} } func (*IngressStatus) ProtoMessage() {} func (*IngressStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{31} + return fileDescriptor_cdc93917efc28165, []int{25} } func (m *IngressStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -948,7 +780,7 @@ var xxx_messageInfo_IngressStatus proto.InternalMessageInfo func (m *IngressTLS) Reset() { *m = IngressTLS{} } func (*IngressTLS) ProtoMessage() {} func (*IngressTLS) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{32} + return fileDescriptor_cdc93917efc28165, []int{26} } func (m *IngressTLS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -976,7 +808,7 @@ var xxx_messageInfo_IngressTLS proto.InternalMessageInfo func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } func (*NetworkPolicy) ProtoMessage() {} func (*NetworkPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{33} + return fileDescriptor_cdc93917efc28165, []int{27} } func (m *NetworkPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1004,7 +836,7 @@ var xxx_messageInfo_NetworkPolicy proto.InternalMessageInfo func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } func (*NetworkPolicyEgressRule) ProtoMessage() {} func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{34} + return fileDescriptor_cdc93917efc28165, []int{28} } func (m *NetworkPolicyEgressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1032,7 +864,7 @@ var xxx_messageInfo_NetworkPolicyEgressRule proto.InternalMessageInfo func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } func (*NetworkPolicyIngressRule) ProtoMessage() {} func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{35} + return fileDescriptor_cdc93917efc28165, []int{29} } func (m *NetworkPolicyIngressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1060,7 +892,7 @@ var xxx_messageInfo_NetworkPolicyIngressRule proto.InternalMessageInfo func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } func (*NetworkPolicyList) ProtoMessage() {} func (*NetworkPolicyList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{36} + return fileDescriptor_cdc93917efc28165, []int{30} } func (m *NetworkPolicyList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1088,7 +920,7 @@ var xxx_messageInfo_NetworkPolicyList proto.InternalMessageInfo func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } func (*NetworkPolicyPeer) ProtoMessage() {} func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{37} + return fileDescriptor_cdc93917efc28165, []int{31} } func (m *NetworkPolicyPeer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1116,7 +948,7 @@ var xxx_messageInfo_NetworkPolicyPeer proto.InternalMessageInfo func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } func (*NetworkPolicyPort) ProtoMessage() {} func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{38} + return fileDescriptor_cdc93917efc28165, []int{32} } func (m *NetworkPolicyPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1144,7 +976,7 @@ var xxx_messageInfo_NetworkPolicyPort proto.InternalMessageInfo func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } func (*NetworkPolicySpec) ProtoMessage() {} func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{39} + return fileDescriptor_cdc93917efc28165, []int{33} } func (m *NetworkPolicySpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1172,7 +1004,7 @@ var xxx_messageInfo_NetworkPolicySpec proto.InternalMessageInfo func (m *NetworkPolicyStatus) Reset() { *m = NetworkPolicyStatus{} } func (*NetworkPolicyStatus) ProtoMessage() {} func (*NetworkPolicyStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{40} + return fileDescriptor_cdc93917efc28165, []int{34} } func (m *NetworkPolicyStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1197,94 +1029,10 @@ func (m *NetworkPolicyStatus) XXX_DiscardUnknown() { var xxx_messageInfo_NetworkPolicyStatus proto.InternalMessageInfo -func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } -func (*PodSecurityPolicy) ProtoMessage() {} -func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{41} -} -func (m *PodSecurityPolicy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSecurityPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSecurityPolicy) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSecurityPolicy.Merge(m, src) -} -func (m *PodSecurityPolicy) XXX_Size() int { - return m.Size() -} -func (m *PodSecurityPolicy) XXX_DiscardUnknown() { - xxx_messageInfo_PodSecurityPolicy.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSecurityPolicy proto.InternalMessageInfo - -func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } -func (*PodSecurityPolicyList) ProtoMessage() {} -func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{42} -} -func (m *PodSecurityPolicyList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSecurityPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSecurityPolicyList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSecurityPolicyList.Merge(m, src) -} -func (m *PodSecurityPolicyList) XXX_Size() int { - return m.Size() -} -func (m *PodSecurityPolicyList) XXX_DiscardUnknown() { - xxx_messageInfo_PodSecurityPolicyList.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSecurityPolicyList proto.InternalMessageInfo - -func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } -func (*PodSecurityPolicySpec) ProtoMessage() {} -func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{43} -} -func (m *PodSecurityPolicySpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSecurityPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSecurityPolicySpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSecurityPolicySpec.Merge(m, src) -} -func (m *PodSecurityPolicySpec) XXX_Size() int { - return m.Size() -} -func (m *PodSecurityPolicySpec) XXX_DiscardUnknown() { - xxx_messageInfo_PodSecurityPolicySpec.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSecurityPolicySpec proto.InternalMessageInfo - func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } func (*ReplicaSet) ProtoMessage() {} func (*ReplicaSet) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{44} + return fileDescriptor_cdc93917efc28165, []int{35} } func (m *ReplicaSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1312,7 +1060,7 @@ var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } func (*ReplicaSetCondition) ProtoMessage() {} func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{45} + return fileDescriptor_cdc93917efc28165, []int{36} } func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1340,7 +1088,7 @@ var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } func (*ReplicaSetList) ProtoMessage() {} func (*ReplicaSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{46} + return fileDescriptor_cdc93917efc28165, []int{37} } func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1368,7 +1116,7 @@ var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } func (*ReplicaSetSpec) ProtoMessage() {} func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{47} + return fileDescriptor_cdc93917efc28165, []int{38} } func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1396,7 +1144,7 @@ var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } func (*ReplicaSetStatus) ProtoMessage() {} func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{48} + return fileDescriptor_cdc93917efc28165, []int{39} } func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1424,7 +1172,7 @@ var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } func (*RollbackConfig) ProtoMessage() {} func (*RollbackConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{49} + return fileDescriptor_cdc93917efc28165, []int{40} } func (m *RollbackConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1452,7 +1200,7 @@ var xxx_messageInfo_RollbackConfig proto.InternalMessageInfo func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } func (*RollingUpdateDaemonSet) ProtoMessage() {} func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{50} + return fileDescriptor_cdc93917efc28165, []int{41} } func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1480,7 +1228,7 @@ var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{51} + return fileDescriptor_cdc93917efc28165, []int{42} } func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1505,15 +1253,15 @@ func (m *RollingUpdateDeployment) XXX_DiscardUnknown() { var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo -func (m *RunAsGroupStrategyOptions) Reset() { *m = RunAsGroupStrategyOptions{} } -func (*RunAsGroupStrategyOptions) ProtoMessage() {} -func (*RunAsGroupStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{52} +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{43} } -func (m *RunAsGroupStrategyOptions) XXX_Unmarshal(b []byte) error { +func (m *Scale) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RunAsGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -1521,27 +1269,27 @@ func (m *RunAsGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([ } return b[:n], nil } -func (m *RunAsGroupStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RunAsGroupStrategyOptions.Merge(m, src) +func (m *Scale) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scale.Merge(m, src) } -func (m *RunAsGroupStrategyOptions) XXX_Size() int { +func (m *Scale) XXX_Size() int { return m.Size() } -func (m *RunAsGroupStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RunAsGroupStrategyOptions.DiscardUnknown(m) +func (m *Scale) XXX_DiscardUnknown() { + xxx_messageInfo_Scale.DiscardUnknown(m) } -var xxx_messageInfo_RunAsGroupStrategyOptions proto.InternalMessageInfo +var xxx_messageInfo_Scale proto.InternalMessageInfo -func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } -func (*RunAsUserStrategyOptions) ProtoMessage() {} -func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{53} +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{44} } -func (m *RunAsUserStrategyOptions) XXX_Unmarshal(b []byte) error { +func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RunAsUserStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -1549,126 +1297,14 @@ func (m *RunAsUserStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([] } return b[:n], nil } -func (m *RunAsUserStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RunAsUserStrategyOptions.Merge(m, src) +func (m *ScaleSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleSpec.Merge(m, src) } -func (m *RunAsUserStrategyOptions) XXX_Size() int { +func (m *ScaleSpec) XXX_Size() int { return m.Size() } -func (m *RunAsUserStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RunAsUserStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_RunAsUserStrategyOptions proto.InternalMessageInfo - -func (m *RuntimeClassStrategyOptions) Reset() { *m = RuntimeClassStrategyOptions{} } -func (*RuntimeClassStrategyOptions) ProtoMessage() {} -func (*RuntimeClassStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{54} -} -func (m *RuntimeClassStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RuntimeClassStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RuntimeClassStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RuntimeClassStrategyOptions.Merge(m, src) -} -func (m *RuntimeClassStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *RuntimeClassStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RuntimeClassStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_RuntimeClassStrategyOptions proto.InternalMessageInfo - -func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } -func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{55} -} -func (m *SELinuxStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SELinuxStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SELinuxStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_SELinuxStrategyOptions.Merge(m, src) -} -func (m *SELinuxStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *SELinuxStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_SELinuxStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_SELinuxStrategyOptions proto.InternalMessageInfo - -func (m *Scale) Reset() { *m = Scale{} } -func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{56} -} -func (m *Scale) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Scale) XXX_Merge(src proto.Message) { - xxx_messageInfo_Scale.Merge(m, src) -} -func (m *Scale) XXX_Size() int { - return m.Size() -} -func (m *Scale) XXX_DiscardUnknown() { - xxx_messageInfo_Scale.DiscardUnknown(m) -} - -var xxx_messageInfo_Scale proto.InternalMessageInfo - -func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } -func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{57} -} -func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ScaleSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_ScaleSpec.Merge(m, src) -} -func (m *ScaleSpec) XXX_Size() int { - return m.Size() -} -func (m *ScaleSpec) XXX_DiscardUnknown() { - xxx_messageInfo_ScaleSpec.DiscardUnknown(m) +func (m *ScaleSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleSpec.DiscardUnknown(m) } var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo @@ -1676,7 +1312,7 @@ var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} func (*ScaleStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{58} + return fileDescriptor_cdc93917efc28165, []int{45} } func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1701,38 +1337,7 @@ func (m *ScaleStatus) XXX_DiscardUnknown() { var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo -func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } -func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} -func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{59} -} -func (m *SupplementalGroupsStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SupplementalGroupsStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SupplementalGroupsStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_SupplementalGroupsStrategyOptions.Merge(m, src) -} -func (m *SupplementalGroupsStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *SupplementalGroupsStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_SupplementalGroupsStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_SupplementalGroupsStrategyOptions proto.InternalMessageInfo - func init() { - proto.RegisterType((*AllowedCSIDriver)(nil), "k8s.io.api.extensions.v1beta1.AllowedCSIDriver") - proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.extensions.v1beta1.AllowedFlexVolume") - proto.RegisterType((*AllowedHostPath)(nil), "k8s.io.api.extensions.v1beta1.AllowedHostPath") proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.extensions.v1beta1.DaemonSet") proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetCondition") proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetList") @@ -1747,11 +1352,8 @@ func init() { proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.extensions.v1beta1.DeploymentSpec") proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.extensions.v1beta1.DeploymentStatus") proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.extensions.v1beta1.DeploymentStrategy") - proto.RegisterType((*FSGroupStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.FSGroupStrategyOptions") proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.api.extensions.v1beta1.HTTPIngressPath") proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.api.extensions.v1beta1.HTTPIngressRuleValue") - proto.RegisterType((*HostPortRange)(nil), "k8s.io.api.extensions.v1beta1.HostPortRange") - proto.RegisterType((*IDRange)(nil), "k8s.io.api.extensions.v1beta1.IDRange") proto.RegisterType((*IPBlock)(nil), "k8s.io.api.extensions.v1beta1.IPBlock") proto.RegisterType((*Ingress)(nil), "k8s.io.api.extensions.v1beta1.Ingress") proto.RegisterType((*IngressBackend)(nil), "k8s.io.api.extensions.v1beta1.IngressBackend") @@ -1772,9 +1374,6 @@ func init() { proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyPort") proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicySpec") proto.RegisterType((*NetworkPolicyStatus)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyStatus") - proto.RegisterType((*PodSecurityPolicy)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicy") - proto.RegisterType((*PodSecurityPolicyList)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicyList") - proto.RegisterType((*PodSecurityPolicySpec)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicySpec") proto.RegisterType((*ReplicaSet)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSet") proto.RegisterType((*ReplicaSetCondition)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetCondition") proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetList") @@ -1783,15 +1382,10 @@ func init() { proto.RegisterType((*RollbackConfig)(nil), "k8s.io.api.extensions.v1beta1.RollbackConfig") proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDaemonSet") proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDeployment") - proto.RegisterType((*RunAsGroupStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RunAsGroupStrategyOptions") - proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RunAsUserStrategyOptions") - proto.RegisterType((*RuntimeClassStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RuntimeClassStrategyOptions") - proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SELinuxStrategyOptions") proto.RegisterType((*Scale)(nil), "k8s.io.api.extensions.v1beta1.Scale") proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.extensions.v1beta1.ScaleSpec") proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus") proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus.SelectorEntry") - proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SupplementalGroupsStrategyOptions") } func init() { @@ -1799,344 +1393,188 @@ func init() { } var fileDescriptor_cdc93917efc28165 = []byte{ - // 3920 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5c, 0x4b, 0x6c, 0x1c, 0xd9, - 0x75, 0x55, 0x75, 0x93, 0xec, 0xe6, 0xa5, 0xf8, 0x7b, 0xa4, 0xc8, 0x1e, 0xca, 0x62, 0xcb, 0x35, - 0xc8, 0x44, 0x33, 0xd1, 0x74, 0x5b, 0x1c, 0x49, 0x1e, 0x8f, 0x10, 0x7b, 0xd8, 0xfc, 0x48, 0xb4, - 0xf9, 0xe9, 0x79, 0x4d, 0xca, 0xc6, 0x20, 0xe3, 0xb8, 0x58, 0xfd, 0xd8, 0xac, 0x61, 0x75, 0x55, - 0xa5, 0xaa, 0x9a, 0x66, 0x07, 0x59, 0x24, 0x48, 0x36, 0x06, 0x02, 0x24, 0x1b, 0x27, 0x59, 0x66, - 0x60, 0x20, 0xbb, 0x20, 0xcb, 0x64, 0xe1, 0x18, 0x09, 0xe2, 0x00, 0x42, 0xe0, 0x04, 0x06, 0xb2, - 0x88, 0x57, 0x44, 0x86, 0x5e, 0x05, 0x59, 0x65, 0x17, 0x68, 0x15, 0xbc, 0x4f, 0xfd, 0xab, 0xd8, - 0xd5, 0x8c, 0x44, 0xc4, 0x81, 0x57, 0x62, 0xbd, 0x7b, 0xef, 0x79, 0xbf, 0xfb, 0xee, 0x3d, 0xef, - 0xd3, 0x82, 0xcd, 0x93, 0xf7, 0x9d, 0x9a, 0x66, 0xd6, 0x4f, 0x7a, 0x87, 0xc4, 0x36, 0x88, 0x4b, - 0x9c, 0xfa, 0x29, 0x31, 0xda, 0xa6, 0x5d, 0x17, 0x02, 0xc5, 0xd2, 0xea, 0xe4, 0xcc, 0x25, 0x86, - 0xa3, 0x99, 0x86, 0x53, 0x3f, 0x7d, 0x70, 0x48, 0x5c, 0xe5, 0x41, 0xbd, 0x43, 0x0c, 0x62, 0x2b, - 0x2e, 0x69, 0xd7, 0x2c, 0xdb, 0x74, 0x4d, 0x74, 0x87, 0xab, 0xd7, 0x14, 0x4b, 0xab, 0x05, 0xea, - 0x35, 0xa1, 0xbe, 0xf4, 0x6e, 0x47, 0x73, 0x8f, 0x7b, 0x87, 0x35, 0xd5, 0xec, 0xd6, 0x3b, 0x66, - 0xc7, 0xac, 0x33, 0xab, 0xc3, 0xde, 0x11, 0xfb, 0x62, 0x1f, 0xec, 0x2f, 0x8e, 0xb6, 0x24, 0x87, - 0x2a, 0x57, 0x4d, 0x9b, 0xd4, 0x4f, 0x13, 0x35, 0x2e, 0x3d, 0x0c, 0x74, 0xba, 0x8a, 0x7a, 0xac, - 0x19, 0xc4, 0xee, 0xd7, 0xad, 0x93, 0x0e, 0x2d, 0x70, 0xea, 0x5d, 0xe2, 0x2a, 0x69, 0x56, 0xf5, - 0x2c, 0x2b, 0xbb, 0x67, 0xb8, 0x5a, 0x97, 0x24, 0x0c, 0x1e, 0x0f, 0x32, 0x70, 0xd4, 0x63, 0xd2, - 0x55, 0x12, 0x76, 0xef, 0x65, 0xd9, 0xf5, 0x5c, 0x4d, 0xaf, 0x6b, 0x86, 0xeb, 0xb8, 0x76, 0xdc, - 0x48, 0x7e, 0x08, 0x33, 0xab, 0xba, 0x6e, 0x7e, 0x97, 0xb4, 0xd7, 0x5a, 0x5b, 0xeb, 0xb6, 0x76, - 0x4a, 0x6c, 0x74, 0x17, 0x46, 0x0c, 0xa5, 0x4b, 0x2a, 0xd2, 0x5d, 0xe9, 0xde, 0x78, 0xe3, 0xe6, - 0x8b, 0xf3, 0xea, 0x8d, 0x8b, 0xf3, 0xea, 0xc8, 0xae, 0xd2, 0x25, 0x98, 0x49, 0xe4, 0x27, 0x30, - 0x2b, 0xac, 0x36, 0x75, 0x72, 0xf6, 0xdc, 0xd4, 0x7b, 0x5d, 0x82, 0xde, 0x82, 0xb1, 0x36, 0x03, - 0x10, 0x86, 0x53, 0xc2, 0x70, 0x8c, 0xc3, 0x62, 0x21, 0x95, 0x1d, 0x98, 0x16, 0xc6, 0xcf, 0x4c, - 0xc7, 0x6d, 0x2a, 0xee, 0x31, 0x5a, 0x01, 0xb0, 0x14, 0xf7, 0xb8, 0x69, 0x93, 0x23, 0xed, 0x4c, - 0x98, 0x23, 0x61, 0x0e, 0x4d, 0x5f, 0x82, 0x43, 0x5a, 0xe8, 0x3e, 0x94, 0x6d, 0xa2, 0xb4, 0xf7, - 0x0c, 0xbd, 0x5f, 0x29, 0xdc, 0x95, 0xee, 0x95, 0x1b, 0x33, 0xc2, 0xa2, 0x8c, 0x45, 0x39, 0xf6, - 0x35, 0xe4, 0xef, 0x17, 0x60, 0x7c, 0x5d, 0x21, 0x5d, 0xd3, 0x68, 0x11, 0x17, 0x7d, 0x07, 0xca, - 0x74, 0xba, 0xda, 0x8a, 0xab, 0xb0, 0xda, 0x26, 0x56, 0xbe, 0x54, 0x0b, 0xdc, 0xc9, 0x1f, 0xbd, - 0x9a, 0x75, 0xd2, 0xa1, 0x05, 0x4e, 0x8d, 0x6a, 0xd7, 0x4e, 0x1f, 0xd4, 0xf6, 0x0e, 0x3f, 0x25, - 0xaa, 0xbb, 0x43, 0x5c, 0x25, 0x68, 0x5f, 0x50, 0x86, 0x7d, 0x54, 0xb4, 0x0b, 0x23, 0x8e, 0x45, - 0x54, 0xd6, 0xb2, 0x89, 0x95, 0xfb, 0xb5, 0x4b, 0x9d, 0xb5, 0xe6, 0xb7, 0xac, 0x65, 0x11, 0x35, - 0x18, 0x71, 0xfa, 0x85, 0x19, 0x0e, 0x7a, 0x0e, 0x63, 0x8e, 0xab, 0xb8, 0x3d, 0xa7, 0x52, 0x64, - 0x88, 0xb5, 0xdc, 0x88, 0xcc, 0x2a, 0x98, 0x0c, 0xfe, 0x8d, 0x05, 0x9a, 0xfc, 0x1f, 0x05, 0x40, - 0xbe, 0xee, 0x9a, 0x69, 0xb4, 0x35, 0x57, 0x33, 0x0d, 0xf4, 0x01, 0x8c, 0xb8, 0x7d, 0xcb, 0x73, - 0x81, 0xb7, 0xbc, 0x06, 0xed, 0xf7, 0x2d, 0xf2, 0xf2, 0xbc, 0xba, 0x90, 0xb4, 0xa0, 0x12, 0xcc, - 0x6c, 0xd0, 0xb6, 0xdf, 0xd4, 0x02, 0xb3, 0x7e, 0x18, 0xad, 0xfa, 0xe5, 0x79, 0x35, 0x65, 0xb1, - 0xd5, 0x7c, 0xa4, 0x68, 0x03, 0xd1, 0x29, 0x20, 0x5d, 0x71, 0xdc, 0x7d, 0x5b, 0x31, 0x1c, 0x5e, - 0x93, 0xd6, 0x25, 0x62, 0x10, 0xde, 0xc9, 0x37, 0x69, 0xd4, 0xa2, 0xb1, 0x24, 0x5a, 0x81, 0xb6, - 0x13, 0x68, 0x38, 0xa5, 0x06, 0xea, 0xcd, 0x36, 0x51, 0x1c, 0xd3, 0xa8, 0x8c, 0x44, 0xbd, 0x19, - 0xb3, 0x52, 0x2c, 0xa4, 0xe8, 0x6d, 0x28, 0x75, 0x89, 0xe3, 0x28, 0x1d, 0x52, 0x19, 0x65, 0x8a, - 0xd3, 0x42, 0xb1, 0xb4, 0xc3, 0x8b, 0xb1, 0x27, 0x97, 0x7f, 0x28, 0xc1, 0xa4, 0x3f, 0x72, 0xdb, - 0x9a, 0xe3, 0xa2, 0xdf, 0x48, 0xf8, 0x61, 0x2d, 0x5f, 0x97, 0xa8, 0x35, 0xf3, 0x42, 0xdf, 0xe7, - 0xbd, 0x92, 0x90, 0x0f, 0xee, 0xc0, 0xa8, 0xe6, 0x92, 0x2e, 0x9d, 0x87, 0xe2, 0xbd, 0x89, 0x95, - 0x7b, 0x79, 0x5d, 0xa6, 0x31, 0x29, 0x40, 0x47, 0xb7, 0xa8, 0x39, 0xe6, 0x28, 0xf2, 0x9f, 0x8c, - 0x84, 0x9a, 0x4f, 0x5d, 0x13, 0x7d, 0x02, 0x65, 0x87, 0xe8, 0x44, 0x75, 0x4d, 0x5b, 0x34, 0xff, - 0xbd, 0x9c, 0xcd, 0x57, 0x0e, 0x89, 0xde, 0x12, 0xa6, 0x8d, 0x9b, 0xb4, 0xfd, 0xde, 0x17, 0xf6, - 0x21, 0xd1, 0x47, 0x50, 0x76, 0x49, 0xd7, 0xd2, 0x15, 0x97, 0x88, 0x75, 0xf4, 0x66, 0xb8, 0x0b, - 0xd4, 0x73, 0x28, 0x58, 0xd3, 0x6c, 0xef, 0x0b, 0x35, 0xb6, 0x7c, 0xfc, 0x21, 0xf1, 0x4a, 0xb1, - 0x0f, 0x83, 0x4e, 0x61, 0xaa, 0x67, 0xb5, 0xa9, 0xa6, 0x4b, 0xa3, 0x60, 0xa7, 0x2f, 0x3c, 0xe9, - 0x71, 0xde, 0xb1, 0x39, 0x88, 0x58, 0x37, 0x16, 0x44, 0x5d, 0x53, 0xd1, 0x72, 0x1c, 0xab, 0x05, - 0xad, 0xc2, 0x74, 0x57, 0x33, 0x68, 0x5c, 0xea, 0xb7, 0x88, 0x6a, 0x1a, 0x6d, 0x87, 0xb9, 0xd5, - 0x68, 0x63, 0x51, 0x00, 0x4c, 0xef, 0x44, 0xc5, 0x38, 0xae, 0x8f, 0xbe, 0x0e, 0xc8, 0xeb, 0xc6, - 0x53, 0x1e, 0xc4, 0x35, 0xd3, 0x60, 0x3e, 0x57, 0x0c, 0x9c, 0x7b, 0x3f, 0xa1, 0x81, 0x53, 0xac, - 0xd0, 0x36, 0xcc, 0xdb, 0xe4, 0x54, 0xa3, 0x7d, 0x7c, 0xa6, 0x39, 0xae, 0x69, 0xf7, 0xb7, 0xb5, - 0xae, 0xe6, 0x56, 0xc6, 0x58, 0x9b, 0x2a, 0x17, 0xe7, 0xd5, 0x79, 0x9c, 0x22, 0xc7, 0xa9, 0x56, - 0xf2, 0x9f, 0x8e, 0xc1, 0x74, 0x2c, 0xde, 0xa0, 0xe7, 0xb0, 0xa0, 0xf6, 0x6c, 0x9b, 0x18, 0xee, - 0x6e, 0xaf, 0x7b, 0x48, 0xec, 0x96, 0x7a, 0x4c, 0xda, 0x3d, 0x9d, 0xb4, 0x99, 0xa3, 0x8c, 0x36, - 0x96, 0x45, 0x8b, 0x17, 0xd6, 0x52, 0xb5, 0x70, 0x86, 0x35, 0x1d, 0x05, 0x83, 0x15, 0xed, 0x68, - 0x8e, 0xe3, 0x63, 0x16, 0x18, 0xa6, 0x3f, 0x0a, 0xbb, 0x09, 0x0d, 0x9c, 0x62, 0x45, 0xdb, 0xd8, - 0x26, 0x8e, 0x66, 0x93, 0x76, 0xbc, 0x8d, 0xc5, 0x68, 0x1b, 0xd7, 0x53, 0xb5, 0x70, 0x86, 0x35, - 0x7a, 0x04, 0x13, 0xbc, 0x36, 0x36, 0x7f, 0x62, 0xa2, 0xe7, 0x04, 0xd8, 0xc4, 0x6e, 0x20, 0xc2, - 0x61, 0x3d, 0xda, 0x35, 0xf3, 0xd0, 0x21, 0xf6, 0x29, 0x69, 0x67, 0x4f, 0xf0, 0x5e, 0x42, 0x03, - 0xa7, 0x58, 0xd1, 0xae, 0x71, 0x0f, 0x4c, 0x74, 0x6d, 0x2c, 0xda, 0xb5, 0x83, 0x54, 0x2d, 0x9c, - 0x61, 0x4d, 0xfd, 0x98, 0x37, 0x79, 0xf5, 0x54, 0xd1, 0x74, 0xe5, 0x50, 0x27, 0x95, 0x52, 0xd4, - 0x8f, 0x77, 0xa3, 0x62, 0x1c, 0xd7, 0x47, 0x4f, 0x61, 0x96, 0x17, 0x1d, 0x18, 0x8a, 0x0f, 0x52, - 0x66, 0x20, 0x6f, 0x08, 0x90, 0xd9, 0xdd, 0xb8, 0x02, 0x4e, 0xda, 0xa0, 0x0f, 0x60, 0x4a, 0x35, - 0x75, 0x9d, 0xf9, 0xe3, 0x9a, 0xd9, 0x33, 0xdc, 0xca, 0x38, 0x43, 0x41, 0x74, 0x3d, 0xae, 0x45, - 0x24, 0x38, 0xa6, 0x89, 0x08, 0x80, 0xea, 0x25, 0x1c, 0xa7, 0x02, 0x2c, 0x3e, 0x3e, 0xc8, 0x1b, - 0x03, 0xfc, 0x54, 0x15, 0x70, 0x00, 0xbf, 0xc8, 0xc1, 0x21, 0x60, 0xf9, 0x9f, 0x24, 0x58, 0xcc, - 0x08, 0x1d, 0xe8, 0x6b, 0x91, 0x14, 0xfb, 0x6b, 0xb1, 0x14, 0x7b, 0x3b, 0xc3, 0x2c, 0x94, 0x67, - 0x0d, 0x98, 0xb4, 0x69, 0xaf, 0x8c, 0x0e, 0x57, 0x11, 0x31, 0xf2, 0xd1, 0x80, 0x6e, 0xe0, 0xb0, - 0x4d, 0x10, 0xf3, 0x67, 0x2f, 0xce, 0xab, 0x93, 0x11, 0x19, 0x8e, 0xc2, 0xcb, 0x7f, 0x56, 0x00, - 0x58, 0x27, 0x96, 0x6e, 0xf6, 0xbb, 0xc4, 0xb8, 0x0e, 0x0e, 0xb5, 0x17, 0xe1, 0x50, 0xef, 0x0e, - 0x9a, 0x1e, 0xbf, 0x69, 0x99, 0x24, 0xea, 0x9b, 0x31, 0x12, 0x55, 0xcf, 0x0f, 0x79, 0x39, 0x8b, - 0xfa, 0xb7, 0x22, 0xcc, 0x05, 0xca, 0x01, 0x8d, 0x7a, 0x12, 0x99, 0xe3, 0x5f, 0x8d, 0xcd, 0xf1, - 0x62, 0x8a, 0xc9, 0x6b, 0xe3, 0x51, 0x9f, 0xc2, 0x14, 0x65, 0x39, 0x7c, 0x2e, 0x19, 0x87, 0x1a, - 0x1b, 0x9a, 0x43, 0xf9, 0xd9, 0x6e, 0x3b, 0x82, 0x84, 0x63, 0xc8, 0x19, 0x9c, 0xad, 0xf4, 0x8b, - 0xc8, 0xd9, 0x7e, 0x24, 0xc1, 0x54, 0x30, 0x4d, 0xd7, 0x40, 0xda, 0x76, 0xa3, 0xa4, 0xed, 0xed, - 0xdc, 0x2e, 0x9a, 0xc1, 0xda, 0xfe, 0x9b, 0x12, 0x7c, 0x5f, 0x89, 0x2e, 0xf0, 0x43, 0x45, 0x3d, - 0x19, 0xbc, 0xc7, 0x43, 0xdf, 0x97, 0x00, 0x89, 0x2c, 0xb0, 0x6a, 0x18, 0xa6, 0xab, 0xf0, 0x58, - 0xc9, 0x9b, 0xb5, 0x95, 0xbb, 0x59, 0x5e, 0x8d, 0xb5, 0x83, 0x04, 0xd6, 0x86, 0xe1, 0xda, 0xfd, - 0x60, 0x92, 0x93, 0x0a, 0x38, 0xa5, 0x01, 0x48, 0x01, 0xb0, 0x05, 0xe6, 0xbe, 0x29, 0x16, 0xf2, - 0xbb, 0x39, 0x62, 0x1e, 0x35, 0x58, 0x33, 0x8d, 0x23, 0xad, 0x13, 0x84, 0x1d, 0xec, 0x03, 0xe1, - 0x10, 0xe8, 0xd2, 0x06, 0x2c, 0x66, 0xb4, 0x16, 0xcd, 0x40, 0xf1, 0x84, 0xf4, 0xf9, 0xb0, 0x61, - 0xfa, 0x27, 0x9a, 0x87, 0xd1, 0x53, 0x45, 0xef, 0xf1, 0xf0, 0x3b, 0x8e, 0xf9, 0xc7, 0x07, 0x85, - 0xf7, 0x25, 0xf9, 0x87, 0xa3, 0x61, 0xdf, 0x61, 0x8c, 0xf9, 0x1e, 0xdd, 0xb4, 0x5a, 0xba, 0xa6, - 0x2a, 0x8e, 0x20, 0x42, 0x37, 0xf9, 0x86, 0x95, 0x97, 0x61, 0x5f, 0x1a, 0xe1, 0xd6, 0x85, 0xd7, - 0xcb, 0xad, 0x8b, 0xaf, 0x86, 0x5b, 0xff, 0x26, 0x94, 0x1d, 0x8f, 0x55, 0x8f, 0x30, 0xc8, 0x07, - 0x43, 0xc4, 0x57, 0x41, 0xa8, 0xfd, 0x0a, 0x7c, 0x2a, 0xed, 0x83, 0xa6, 0x91, 0xe8, 0xd1, 0x21, - 0x49, 0xf4, 0x2b, 0x25, 0xbe, 0x34, 0xde, 0x58, 0x4a, 0xcf, 0x21, 0x6d, 0x16, 0xdb, 0xca, 0x41, - 0xbc, 0x69, 0xb2, 0x52, 0x2c, 0xa4, 0xe8, 0x93, 0x88, 0xcb, 0x96, 0xaf, 0xe2, 0xb2, 0x53, 0xd9, - 0xee, 0x8a, 0x0e, 0x60, 0xd1, 0xb2, 0xcd, 0x8e, 0x4d, 0x1c, 0x67, 0x9d, 0x28, 0x6d, 0x5d, 0x33, - 0x88, 0x37, 0x3e, 0x9c, 0x11, 0xdd, 0xbe, 0x38, 0xaf, 0x2e, 0x36, 0xd3, 0x55, 0x70, 0x96, 0xad, - 0xfc, 0x62, 0x04, 0x66, 0xe2, 0x19, 0x30, 0x83, 0xa4, 0x4a, 0x57, 0x22, 0xa9, 0xf7, 0x43, 0x8b, - 0x81, 0x33, 0xf8, 0xd0, 0x09, 0x4e, 0x62, 0x41, 0xac, 0xc2, 0xb4, 0x88, 0x06, 0x9e, 0x50, 0xd0, - 0x74, 0x7f, 0xf6, 0x0f, 0xa2, 0x62, 0x1c, 0xd7, 0x47, 0x4f, 0x60, 0xd2, 0x66, 0xbc, 0xdb, 0x03, - 0xe0, 0xdc, 0xf5, 0x96, 0x00, 0x98, 0xc4, 0x61, 0x21, 0x8e, 0xea, 0x52, 0xde, 0x1a, 0xd0, 0x51, - 0x0f, 0x60, 0x24, 0xca, 0x5b, 0x57, 0xe3, 0x0a, 0x38, 0x69, 0x83, 0x76, 0x60, 0xae, 0x67, 0x24, - 0xa1, 0xb8, 0x2b, 0xdf, 0x16, 0x50, 0x73, 0x07, 0x49, 0x15, 0x9c, 0x66, 0x87, 0x8e, 0x22, 0x54, - 0x76, 0x8c, 0x85, 0xe7, 0x95, 0xdc, 0x0b, 0x2f, 0x37, 0x97, 0x4d, 0xa1, 0xdb, 0xe5, 0xbc, 0x74, - 0x5b, 0xfe, 0x7b, 0x29, 0x9c, 0x84, 0x7c, 0x0a, 0x3c, 0xe8, 0x94, 0x29, 0x61, 0x11, 0x62, 0x47, - 0x66, 0x3a, 0xfb, 0x7d, 0x3c, 0x14, 0xfb, 0x0d, 0x92, 0xe7, 0x60, 0xfa, 0xfb, 0x99, 0x04, 0x0b, - 0x9b, 0xad, 0xa7, 0xb6, 0xd9, 0xb3, 0xbc, 0xe6, 0xec, 0x59, 0x7c, 0x68, 0xbe, 0x0c, 0x23, 0x76, - 0x4f, 0xf7, 0xfa, 0xf1, 0xa6, 0xd7, 0x0f, 0xdc, 0xd3, 0x69, 0x3f, 0xe6, 0x62, 0x56, 0xbc, 0x13, - 0xd4, 0x00, 0xed, 0xc2, 0x98, 0xad, 0x18, 0x1d, 0xe2, 0xa5, 0xd5, 0xb7, 0x06, 0xb4, 0x7e, 0x6b, - 0x1d, 0x53, 0xf5, 0x10, 0xb1, 0x61, 0xd6, 0x58, 0xa0, 0xc8, 0xff, 0x20, 0xc1, 0xf4, 0xb3, 0xfd, - 0xfd, 0xe6, 0x96, 0xc1, 0x56, 0x34, 0x3b, 0x5b, 0xbd, 0x0b, 0x23, 0x96, 0xe2, 0x1e, 0xc7, 0x33, - 0x3d, 0x95, 0x61, 0x26, 0x41, 0x0f, 0xa1, 0x4c, 0xff, 0xa5, 0xed, 0x62, 0x4b, 0x6a, 0x9c, 0x05, - 0xc2, 0x72, 0x53, 0x94, 0xbd, 0x0c, 0xfd, 0x8d, 0x7d, 0x4d, 0xf4, 0x2d, 0x28, 0xd1, 0xf8, 0x43, - 0x8c, 0x76, 0x4e, 0x82, 0x2e, 0x1a, 0xd5, 0xe0, 0x46, 0x01, 0xe7, 0x12, 0x05, 0xd8, 0x83, 0x93, - 0x4f, 0x60, 0x3e, 0xd4, 0x09, 0x3a, 0x8a, 0xcf, 0x69, 0x4e, 0x45, 0x2d, 0x18, 0xa5, 0xb5, 0xd3, - 0xcc, 0x59, 0xcc, 0x71, 0x04, 0x1a, 0x1b, 0x88, 0x80, 0x1f, 0xd1, 0x2f, 0x07, 0x73, 0x2c, 0x79, - 0x07, 0x26, 0xd9, 0x31, 0xb4, 0x69, 0xbb, 0x6c, 0x30, 0xd1, 0x1d, 0x28, 0x76, 0x35, 0x43, 0x64, - 0xe7, 0x09, 0x61, 0x53, 0xa4, 0x99, 0x85, 0x96, 0x33, 0xb1, 0x72, 0x26, 0xe2, 0x55, 0x20, 0x56, - 0xce, 0x30, 0x2d, 0x97, 0x9f, 0x42, 0x49, 0x4c, 0x52, 0x18, 0xa8, 0x78, 0x39, 0x50, 0x31, 0x05, - 0x68, 0x0f, 0x4a, 0x5b, 0xcd, 0x86, 0x6e, 0x72, 0xae, 0xa6, 0x6a, 0x6d, 0x3b, 0x3e, 0x83, 0x6b, - 0x5b, 0xeb, 0x18, 0x33, 0x09, 0x92, 0x61, 0x8c, 0x9c, 0xa9, 0xc4, 0x72, 0x99, 0x1f, 0x8d, 0x37, - 0x80, 0xfa, 0xc6, 0x06, 0x2b, 0xc1, 0x42, 0x22, 0xff, 0x51, 0x01, 0x4a, 0x62, 0x38, 0xae, 0x61, - 0xef, 0xb6, 0x1d, 0xd9, 0xbb, 0xbd, 0x93, 0xcf, 0x35, 0x32, 0x37, 0x6e, 0xfb, 0xb1, 0x8d, 0xdb, - 0xfd, 0x9c, 0x78, 0x97, 0xef, 0xda, 0xbe, 0x57, 0x80, 0xa9, 0xa8, 0x53, 0xa2, 0x47, 0x30, 0x41, - 0xd3, 0x94, 0xa6, 0x92, 0xdd, 0x80, 0x1d, 0xfb, 0x47, 0x37, 0xad, 0x40, 0x84, 0xc3, 0x7a, 0xa8, - 0xe3, 0x9b, 0x51, 0x3f, 0x12, 0x9d, 0xce, 0x1e, 0xd2, 0x9e, 0xab, 0xe9, 0x35, 0x7e, 0x21, 0x53, - 0xdb, 0x32, 0xdc, 0x3d, 0xbb, 0xe5, 0xda, 0x9a, 0xd1, 0x49, 0x54, 0xc4, 0x9c, 0x32, 0x8c, 0x8c, - 0xbe, 0x49, 0x53, 0xa6, 0x63, 0xf6, 0x6c, 0x95, 0xa4, 0x51, 0x5f, 0x8f, 0xb6, 0xd1, 0x05, 0xda, - 0xde, 0x36, 0x55, 0x45, 0xe7, 0x93, 0x83, 0xc9, 0x11, 0xb1, 0x89, 0xa1, 0x12, 0x8f, 0x6e, 0x72, - 0x08, 0xec, 0x83, 0xc9, 0x7f, 0x23, 0xc1, 0x84, 0x18, 0x8b, 0x6b, 0xd8, 0xe4, 0x7c, 0x23, 0xba, - 0xc9, 0x79, 0x2b, 0x67, 0xe4, 0x48, 0xdf, 0xe1, 0xfc, 0xad, 0x04, 0x4b, 0x5e, 0xd3, 0x4d, 0xa5, - 0xdd, 0x50, 0x74, 0xc5, 0x50, 0x89, 0xed, 0xf9, 0xfa, 0x12, 0x14, 0x34, 0x4b, 0xcc, 0x24, 0x08, - 0x80, 0xc2, 0x56, 0x13, 0x17, 0x34, 0x8b, 0x32, 0x90, 0x63, 0xd3, 0x71, 0xd9, 0x4e, 0x88, 0x6f, - 0xb2, 0xfd, 0x56, 0x3f, 0x13, 0xe5, 0xd8, 0xd7, 0x40, 0x07, 0x30, 0x6a, 0x99, 0xb6, 0x4b, 0xb3, - 0x7e, 0x31, 0x36, 0xbf, 0x97, 0xb4, 0x9a, 0xce, 0x9b, 0x70, 0xc4, 0x20, 0x02, 0x51, 0x18, 0xcc, - 0xd1, 0xe4, 0xdf, 0x93, 0xe0, 0x8d, 0x94, 0xf6, 0x0b, 0xc2, 0xd5, 0x86, 0x92, 0xc6, 0x85, 0x22, - 0xec, 0x7d, 0x25, 0x5f, 0xb5, 0x29, 0x43, 0x11, 0x84, 0x5c, 0x2f, 0xb4, 0x7a, 0xd0, 0xf2, 0x0f, - 0x24, 0x98, 0x4d, 0xb4, 0x97, 0xa5, 0x0e, 0xea, 0xcf, 0x62, 0xa7, 0xe2, 0xa7, 0x0e, 0xea, 0x96, - 0x4c, 0x82, 0xbe, 0x01, 0x65, 0x76, 0x8f, 0xa8, 0x9a, 0xba, 0x18, 0xc0, 0xba, 0x37, 0x80, 0x4d, - 0x51, 0xfe, 0xf2, 0xbc, 0x7a, 0x3b, 0xe5, 0x9c, 0xc2, 0x13, 0x63, 0x1f, 0x00, 0x55, 0x61, 0x94, - 0xd8, 0xb6, 0x69, 0x8b, 0x24, 0x34, 0x4e, 0x47, 0x6a, 0x83, 0x16, 0x60, 0x5e, 0x2e, 0xff, 0x45, - 0xe0, 0xa4, 0x34, 0x2b, 0xd0, 0xf6, 0xd1, 0xc9, 0x89, 0x07, 0x46, 0x3a, 0x75, 0x98, 0x49, 0x50, - 0x0f, 0x66, 0xb4, 0x58, 0x1a, 0x11, 0xab, 0xb3, 0x9e, 0x6f, 0x18, 0x7d, 0xb3, 0x46, 0x45, 0xc0, - 0xcf, 0xc4, 0x25, 0x38, 0x51, 0x85, 0x4c, 0x20, 0xa1, 0x85, 0x3e, 0x82, 0x91, 0x63, 0xd7, 0xb5, - 0x52, 0x2e, 0x4a, 0x06, 0x24, 0xaf, 0xa0, 0x09, 0x65, 0xd6, 0xbb, 0xfd, 0xfd, 0x26, 0x66, 0x50, - 0xf2, 0xdf, 0x15, 0xfc, 0xf1, 0x60, 0xbb, 0xcb, 0x0f, 0xfd, 0xde, 0xae, 0xe9, 0x8a, 0xe3, 0xb0, - 0x10, 0xc6, 0x4f, 0x42, 0xe6, 0x43, 0x0d, 0xf7, 0x65, 0x38, 0xa1, 0x8d, 0xf6, 0x83, 0xa4, 0x2e, - 0x5d, 0x25, 0xa9, 0x4f, 0xa4, 0x25, 0x74, 0xf4, 0x0c, 0x8a, 0xae, 0x9e, 0xf7, 0x44, 0x43, 0x20, - 0xee, 0x6f, 0xb7, 0x82, 0xac, 0xb8, 0xbf, 0xdd, 0xc2, 0x14, 0x02, 0xed, 0xc1, 0x28, 0x25, 0x4e, - 0x34, 0x0f, 0x14, 0xf3, 0xe7, 0x15, 0x3a, 0x82, 0xc1, 0xe2, 0xa3, 0x5f, 0x0e, 0xe6, 0x38, 0xf2, - 0xef, 0x4b, 0x30, 0x19, 0xc9, 0x16, 0xc8, 0x86, 0x9b, 0x7a, 0x68, 0xed, 0x88, 0x71, 0x78, 0x7f, - 0xf8, 0x55, 0x27, 0x16, 0xfd, 0xbc, 0xa8, 0xf7, 0x66, 0x58, 0x86, 0x23, 0x75, 0xc8, 0x0a, 0x40, - 0xd0, 0x6d, 0xba, 0x0e, 0xa8, 0xf3, 0xf2, 0x05, 0x2f, 0xd6, 0x01, 0xf5, 0x69, 0x07, 0xf3, 0x72, - 0xb4, 0x02, 0xe0, 0x10, 0xd5, 0x26, 0xee, 0x6e, 0x10, 0xb8, 0xfc, 0x74, 0xdc, 0xf2, 0x25, 0x38, - 0xa4, 0x25, 0x7f, 0x56, 0x80, 0xc9, 0x5d, 0xe2, 0x7e, 0xd7, 0xb4, 0x4f, 0x9a, 0xa6, 0xae, 0xa9, - 0xfd, 0x6b, 0x20, 0x01, 0x38, 0x42, 0x02, 0x06, 0xc5, 0xcb, 0x48, 0xeb, 0x32, 0xa9, 0xc0, 0xc7, - 0x31, 0x2a, 0xb0, 0x32, 0x14, 0xea, 0xe5, 0x84, 0xe0, 0x47, 0x12, 0x2c, 0x46, 0xf4, 0x37, 0x82, - 0x58, 0xe3, 0x07, 0x7f, 0x29, 0x57, 0xf0, 0x8f, 0xc0, 0xd0, 0x80, 0x99, 0x1e, 0xfc, 0xd1, 0x36, - 0x14, 0x5c, 0x53, 0xac, 0x8c, 0xe1, 0x30, 0x09, 0xb1, 0x83, 0x7c, 0xb6, 0x6f, 0xe2, 0x82, 0x6b, - 0xca, 0xff, 0x28, 0x41, 0x25, 0xa2, 0x15, 0x8e, 0x96, 0xaf, 0xa9, 0x07, 0x18, 0x46, 0x8e, 0x6c, - 0xb3, 0x7b, 0xe5, 0x3e, 0xf8, 0x93, 0xbc, 0x69, 0x9b, 0x5d, 0xcc, 0xb0, 0xe4, 0x1f, 0x4b, 0x30, - 0x1b, 0xd1, 0xbc, 0x06, 0x4e, 0xf2, 0x51, 0x94, 0x93, 0xdc, 0x1f, 0xa6, 0x23, 0x19, 0xcc, 0xe4, - 0xc7, 0x85, 0x58, 0x37, 0x68, 0x87, 0xd1, 0x11, 0x4c, 0x58, 0x66, 0xbb, 0xf5, 0x0a, 0x2e, 0xce, - 0xa7, 0x29, 0x57, 0x6c, 0x06, 0x58, 0x38, 0x0c, 0x8c, 0xce, 0x60, 0x96, 0xd2, 0x16, 0xc7, 0x52, - 0x54, 0xd2, 0x7a, 0x05, 0x47, 0x89, 0xb7, 0xd8, 0xcd, 0x5c, 0x1c, 0x11, 0x27, 0x2b, 0x41, 0x3b, - 0x50, 0xd2, 0x2c, 0xb6, 0x77, 0x11, 0x8b, 0x74, 0x20, 0xc1, 0xe3, 0x3b, 0x1d, 0x9e, 0x3e, 0xc4, - 0x07, 0xf6, 0x30, 0xe4, 0x7f, 0x8d, 0x7b, 0x03, 0xa3, 0xc2, 0x4f, 0x43, 0xd4, 0x43, 0xdc, 0xa1, - 0x5d, 0x8d, 0x76, 0xec, 0x0a, 0x96, 0x73, 0x55, 0xd6, 0x5e, 0x8e, 0x71, 0xa2, 0x5f, 0x81, 0x12, - 0x31, 0xda, 0x6c, 0x23, 0xc0, 0x0f, 0xa8, 0x58, 0xaf, 0x36, 0x78, 0x11, 0xf6, 0x64, 0xf2, 0x1f, - 0x14, 0x63, 0xbd, 0x62, 0x29, 0xfc, 0xd3, 0x57, 0xe6, 0x1c, 0xfe, 0x66, 0x22, 0xd3, 0x41, 0x0e, - 0x03, 0x6a, 0xc9, 0x7d, 0xfe, 0xcb, 0xc3, 0xf8, 0x7c, 0x38, 0xb7, 0x66, 0x12, 0x4b, 0xf4, 0x6d, - 0x18, 0x23, 0xbc, 0x0a, 0x9e, 0xb1, 0x1f, 0x0f, 0x53, 0x45, 0x10, 0x7e, 0x83, 0x90, 0x2d, 0xca, - 0x04, 0x2a, 0xfa, 0x1a, 0x1d, 0x2f, 0xaa, 0x4b, 0xb7, 0x3c, 0x9c, 0x99, 0x8f, 0x37, 0xee, 0xf0, - 0x6e, 0xfb, 0xc5, 0x2f, 0xcf, 0xab, 0x10, 0x7c, 0xe2, 0xb0, 0x85, 0xfc, 0xdb, 0x30, 0x97, 0x92, - 0x22, 0x90, 0x1a, 0x39, 0x55, 0xe3, 0x11, 0xb3, 0x9e, 0x6f, 0x1a, 0xf2, 0x5f, 0x0f, 0xff, 0xb3, - 0x04, 0xb3, 0x6c, 0x76, 0xd4, 0x9e, 0xad, 0xb9, 0xfd, 0x6b, 0xcb, 0xcb, 0xcf, 0x23, 0x79, 0xf9, - 0xe1, 0x80, 0x29, 0x49, 0xb4, 0x30, 0x2b, 0x37, 0xcb, 0x3f, 0x91, 0xe0, 0x56, 0x42, 0xfb, 0x1a, - 0x42, 0xf7, 0x41, 0x34, 0x74, 0x7f, 0x69, 0xd8, 0x0e, 0x65, 0x84, 0xef, 0xff, 0x9a, 0x4d, 0xe9, - 0x0e, 0x5b, 0xa5, 0x2b, 0x00, 0x96, 0xad, 0x9d, 0x6a, 0x3a, 0xe9, 0x88, 0x17, 0x2d, 0xe5, 0xd0, - 0x7b, 0x45, 0x5f, 0x82, 0x43, 0x5a, 0xc8, 0x81, 0x85, 0x36, 0x39, 0x52, 0x7a, 0xba, 0xbb, 0xda, - 0x6e, 0xaf, 0x29, 0x96, 0x72, 0xa8, 0xe9, 0x9a, 0xab, 0x89, 0xb3, 0xbf, 0xf1, 0xc6, 0x13, 0xfe, - 0xd2, 0x24, 0x4d, 0xe3, 0xe5, 0x79, 0xf5, 0x4e, 0xda, 0x55, 0xaf, 0xa7, 0xd2, 0xc7, 0x19, 0xd0, - 0xa8, 0x0f, 0x15, 0x9b, 0xfc, 0x56, 0x4f, 0xb3, 0x49, 0x7b, 0xdd, 0x36, 0xad, 0x48, 0xb5, 0x45, - 0x56, 0xed, 0xaf, 0x5f, 0x9c, 0x57, 0x2b, 0x38, 0x43, 0x67, 0x70, 0xc5, 0x99, 0xf0, 0xe8, 0x53, - 0x98, 0x53, 0xc4, 0xcb, 0xd2, 0x70, 0xad, 0x7c, 0x85, 0xbe, 0x7f, 0x71, 0x5e, 0x9d, 0x5b, 0x4d, - 0x8a, 0x07, 0x57, 0x98, 0x06, 0x8a, 0xea, 0x50, 0x3a, 0x65, 0x8f, 0x50, 0x9d, 0xca, 0x28, 0xc3, - 0xa7, 0xb9, 0xaa, 0xc4, 0xdf, 0xa5, 0x52, 0xcc, 0xb1, 0xcd, 0x16, 0x5b, 0xf9, 0x9e, 0x16, 0x7a, - 0x04, 0x13, 0x94, 0x4a, 0x8b, 0x95, 0xcf, 0xae, 0x7f, 0xca, 0x41, 0xc4, 0x7c, 0x16, 0x88, 0x70, - 0x58, 0x0f, 0x7d, 0x02, 0xe3, 0xc7, 0xe2, 0xb0, 0xd0, 0xa9, 0x94, 0x72, 0xf1, 0x84, 0xc8, 0xe1, - 0x62, 0x63, 0x56, 0x54, 0x31, 0xee, 0x15, 0x3b, 0x38, 0x40, 0x44, 0x6f, 0x43, 0x89, 0x7d, 0x6c, - 0xad, 0xb3, 0xb3, 0xf5, 0x72, 0x10, 0x57, 0x9f, 0xf1, 0x62, 0xec, 0xc9, 0x3d, 0xd5, 0xad, 0xe6, - 0x1a, 0xbb, 0xe3, 0x89, 0xa9, 0x6e, 0x35, 0xd7, 0xb0, 0x27, 0x47, 0xdf, 0x81, 0x92, 0x43, 0xb6, - 0x35, 0xa3, 0x77, 0x56, 0x81, 0x5c, 0x2f, 0x44, 0x5a, 0x1b, 0x4c, 0x3b, 0x76, 0xca, 0x1d, 0xd4, - 0x20, 0xe4, 0xd8, 0x83, 0x45, 0xc7, 0x30, 0x6e, 0xf7, 0x8c, 0x55, 0xe7, 0xc0, 0x21, 0x76, 0x65, - 0x82, 0xd5, 0x31, 0x28, 0x95, 0x60, 0x4f, 0x3f, 0x5e, 0x8b, 0x3f, 0x42, 0xbe, 0x06, 0x0e, 0xc0, - 0xd1, 0x31, 0x00, 0xfb, 0x60, 0x07, 0xea, 0x95, 0x85, 0x5c, 0x5b, 0x33, 0xec, 0x1b, 0xc4, 0xeb, - 0xe2, 0x97, 0x6a, 0xbe, 0x18, 0x87, 0xb0, 0xd1, 0x1f, 0x4a, 0x80, 0x9c, 0x9e, 0x65, 0xe9, 0xa4, - 0x4b, 0x0c, 0x57, 0xd1, 0x59, 0xa9, 0x53, 0xb9, 0xc9, 0xaa, 0xfc, 0x70, 0xd0, 0x08, 0x26, 0x0c, - 0xe3, 0x55, 0xfb, 0x77, 0x65, 0x49, 0x55, 0x9c, 0x52, 0x2f, 0x9d, 0xc4, 0x23, 0xd1, 0xeb, 0xc9, - 0x5c, 0x93, 0x98, 0x7e, 0x55, 0x11, 0x4c, 0xa2, 0x90, 0x63, 0x0f, 0x16, 0x3d, 0x87, 0x05, 0xef, - 0xb5, 0x34, 0x36, 0x4d, 0x77, 0x53, 0xd3, 0x89, 0xd3, 0x77, 0x5c, 0xd2, 0xad, 0x4c, 0x31, 0x07, - 0xf3, 0x9f, 0x8c, 0xe1, 0x54, 0x2d, 0x9c, 0x61, 0x8d, 0xba, 0x50, 0xf5, 0x82, 0x13, 0x5d, 0xb9, - 0x7e, 0x74, 0xdc, 0x70, 0x54, 0x45, 0xe7, 0xd7, 0x87, 0xd3, 0xac, 0x82, 0x37, 0x2f, 0xce, 0xab, - 0xd5, 0xf5, 0xcb, 0x55, 0xf1, 0x20, 0x2c, 0xf4, 0x2d, 0xa8, 0x28, 0x59, 0xf5, 0xcc, 0xb0, 0x7a, - 0xbe, 0x40, 0x23, 0x5e, 0x66, 0x05, 0x99, 0xd6, 0xc8, 0x85, 0x19, 0x25, 0xfa, 0x6e, 0xdd, 0xa9, - 0xcc, 0xe6, 0xba, 0x89, 0x88, 0x3d, 0x77, 0x0f, 0x8e, 0x92, 0x62, 0x02, 0x07, 0x27, 0x6a, 0x40, - 0xbf, 0x03, 0x48, 0x89, 0x3f, 0xb5, 0x77, 0x2a, 0x28, 0x57, 0xa2, 0x4b, 0xbc, 0xd1, 0x0f, 0xdc, - 0x2e, 0x21, 0x72, 0x70, 0x4a, 0x3d, 0x74, 0x0f, 0xa1, 0xc4, 0x7e, 0x1e, 0xe0, 0x54, 0x16, 0x13, - 0x6c, 0xe8, 0x92, 0xca, 0x7d, 0xbb, 0xd0, 0x2d, 0x69, 0x1c, 0x11, 0x27, 0x2b, 0x41, 0xdb, 0x30, - 0x2f, 0x0a, 0x0f, 0x0c, 0x47, 0x39, 0x22, 0xad, 0xbe, 0xa3, 0xba, 0xba, 0x53, 0x99, 0x63, 0xf1, - 0x9d, 0xdd, 0xd4, 0xaf, 0xa6, 0xc8, 0x71, 0xaa, 0x15, 0xfa, 0x10, 0x66, 0x8e, 0x4c, 0xfb, 0x50, - 0x6b, 0xb7, 0x89, 0xe1, 0x21, 0xcd, 0x33, 0x24, 0x76, 0x32, 0xb6, 0x19, 0x93, 0xe1, 0x84, 0x36, - 0x72, 0xe0, 0x96, 0x40, 0x6e, 0xda, 0xa6, 0xba, 0x63, 0xf6, 0x0c, 0x97, 0x53, 0xce, 0x5b, 0x7e, - 0x1a, 0xbd, 0xb5, 0x9a, 0xa6, 0xf0, 0xf2, 0xbc, 0x7a, 0x37, 0x7d, 0x23, 0x12, 0x28, 0xe1, 0x74, - 0x6c, 0x64, 0xc1, 0x4d, 0xf1, 0xa3, 0x0f, 0x76, 0x44, 0x57, 0xa9, 0xb0, 0xa5, 0xff, 0xc1, 0xe0, - 0x80, 0xe7, 0x9b, 0xc4, 0xd7, 0xff, 0xcc, 0xc5, 0x79, 0xf5, 0x66, 0x58, 0x01, 0x47, 0x6a, 0x60, - 0x8f, 0xfc, 0xc4, 0xd5, 0xf2, 0xf5, 0xfc, 0x50, 0x62, 0xb8, 0x47, 0x7e, 0x41, 0xd3, 0x5e, 0xd9, - 0x23, 0xbf, 0x10, 0xe4, 0xe5, 0xa7, 0x43, 0xff, 0x59, 0x80, 0xb9, 0x40, 0x39, 0xf7, 0x23, 0xbf, - 0x14, 0x93, 0x5f, 0xfe, 0x58, 0x22, 0xdf, 0xc3, 0xbb, 0x60, 0xe8, 0xfe, 0xef, 0x3d, 0xbc, 0x0b, - 0xda, 0x96, 0xb1, 0x7b, 0xf8, 0xab, 0x42, 0xb8, 0x03, 0x43, 0xbe, 0xfe, 0x7a, 0x05, 0xbf, 0x17, - 0xf8, 0x85, 0x7b, 0x40, 0x26, 0xff, 0xa4, 0x08, 0x33, 0xf1, 0xd5, 0x18, 0x79, 0x24, 0x24, 0x0d, - 0x7c, 0x24, 0xd4, 0x84, 0xf9, 0xa3, 0x9e, 0xae, 0xf7, 0x59, 0x1f, 0x42, 0x2f, 0x85, 0xf8, 0x75, - 0xfd, 0x17, 0x84, 0xe5, 0xfc, 0x66, 0x8a, 0x0e, 0x4e, 0xb5, 0x4c, 0xbe, 0x19, 0x1a, 0xf9, 0xdf, - 0xbe, 0x19, 0x1a, 0xbd, 0xc2, 0x9b, 0xa1, 0xf4, 0x67, 0x57, 0xc5, 0x2b, 0x3d, 0xbb, 0xba, 0xca, - 0x83, 0xa1, 0x94, 0x20, 0x36, 0xf0, 0x74, 0xe3, 0xab, 0x30, 0x15, 0x7d, 0xc4, 0xc6, 0xe7, 0x92, - 0xbf, 0xa3, 0x13, 0xcf, 0x22, 0x42, 0x73, 0xc9, 0xcb, 0xb1, 0xaf, 0x21, 0x5f, 0x48, 0xb0, 0x90, - 0xfe, 0x58, 0x1d, 0xe9, 0x30, 0xd5, 0x55, 0xce, 0xc2, 0x3f, 0x20, 0x90, 0xae, 0x78, 0x78, 0xc7, - 0x5e, 0x2f, 0xed, 0x44, 0xb0, 0x70, 0x0c, 0x1b, 0x7d, 0x0c, 0xe5, 0xae, 0x72, 0xd6, 0xea, 0xd9, - 0x1d, 0x72, 0xe5, 0x43, 0x42, 0xb6, 0x8c, 0x76, 0x04, 0x0a, 0xf6, 0xf1, 0xe4, 0x9f, 0x4b, 0xb0, - 0x98, 0xf1, 0x26, 0xe9, 0xff, 0x51, 0x2f, 0x7f, 0x20, 0xc1, 0x1b, 0x99, 0xdb, 0x30, 0xf4, 0x38, - 0xf2, 0x7c, 0x4a, 0x8e, 0x3d, 0x9f, 0x42, 0x49, 0xc3, 0xd7, 0xf4, 0x7a, 0xea, 0x33, 0x09, 0x2a, - 0x59, 0xfb, 0x52, 0xf4, 0x28, 0xd2, 0xc8, 0x2f, 0xc6, 0x1a, 0x39, 0x9b, 0xb0, 0x7b, 0x4d, 0x6d, - 0xfc, 0x17, 0x09, 0x6e, 0x5f, 0xc2, 0xef, 0xfc, 0xed, 0x0f, 0x69, 0x87, 0xb5, 0xd8, 0xa9, 0xbd, - 0xb8, 0x4e, 0x0c, 0xb6, 0x3f, 0x29, 0x3a, 0x38, 0xd3, 0x1a, 0x1d, 0xc0, 0xa2, 0xd8, 0x7b, 0xc5, - 0x65, 0x82, 0xba, 0xb0, 0x57, 0xa6, 0xeb, 0xe9, 0x2a, 0x38, 0xcb, 0x56, 0xfe, 0x4b, 0x09, 0x16, - 0xd2, 0x0f, 0x1c, 0xd0, 0x7b, 0x91, 0x21, 0xaf, 0xc6, 0x86, 0x7c, 0x3a, 0x66, 0x25, 0x06, 0xfc, - 0xdb, 0x30, 0x25, 0x8e, 0x25, 0x04, 0x8c, 0x70, 0x66, 0x39, 0x2d, 0x3b, 0x09, 0x08, 0x8f, 0x1c, - 0xb3, 0x65, 0x12, 0x2d, 0xc3, 0x31, 0x34, 0xf9, 0x7b, 0x05, 0x18, 0x6d, 0xa9, 0x8a, 0x4e, 0xae, - 0x81, 0x1b, 0x7f, 0x3d, 0xc2, 0x8d, 0x07, 0xfd, 0x7e, 0x93, 0xb5, 0x2a, 0x93, 0x16, 0xe3, 0x18, - 0x2d, 0x7e, 0x27, 0x17, 0xda, 0xe5, 0x8c, 0xf8, 0x2b, 0x30, 0xee, 0x57, 0x3a, 0x5c, 0xa2, 0x96, - 0xff, 0xbc, 0x00, 0x13, 0xa1, 0x2a, 0x86, 0x4c, 0xf3, 0x47, 0x11, 0x6e, 0x53, 0xcc, 0x71, 0x08, - 0x14, 0xaa, 0xab, 0xe6, 0xb1, 0x19, 0xfe, 0xfb, 0x83, 0xe0, 0xc5, 0x79, 0x92, 0xe4, 0x7c, 0x15, - 0xa6, 0x5c, 0xc5, 0xee, 0x10, 0xd7, 0xbf, 0x90, 0xe1, 0x4f, 0x53, 0xfc, 0x1f, 0xc2, 0xec, 0x47, - 0xa4, 0x38, 0xa6, 0xbd, 0xf4, 0x04, 0x26, 0x23, 0x95, 0x0d, 0xf5, 0xf3, 0x81, 0xbf, 0x96, 0xe0, - 0x8b, 0x03, 0x0f, 0x92, 0x50, 0x23, 0xb2, 0x48, 0x6a, 0xb1, 0x45, 0xb2, 0x9c, 0x0d, 0xf0, 0xfa, - 0x9e, 0xa1, 0x36, 0xd6, 0x5e, 0x7c, 0xbe, 0x7c, 0xe3, 0xa7, 0x9f, 0x2f, 0xdf, 0xf8, 0xd9, 0xe7, - 0xcb, 0x37, 0x7e, 0xf7, 0x62, 0x59, 0x7a, 0x71, 0xb1, 0x2c, 0xfd, 0xf4, 0x62, 0x59, 0xfa, 0xd9, - 0xc5, 0xb2, 0xf4, 0xef, 0x17, 0xcb, 0xd2, 0x1f, 0xff, 0x7c, 0xf9, 0xc6, 0xc7, 0x77, 0x2e, 0xfd, - 0xff, 0x1e, 0xfe, 0x27, 0x00, 0x00, 0xff, 0xff, 0x08, 0x20, 0x7f, 0x0a, 0x28, 0x42, 0x00, 0x00, -} - -func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AllowedCSIDriver) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AllowedCSIDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AllowedFlexVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Driver) - copy(dAtA[i:], m.Driver) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AllowedHostPath) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AllowedHostPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i-- - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - i -= len(m.PathPrefix) - copy(dAtA[i:], m.PathPrefix) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + // 2890 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcf, 0x6f, 0x24, 0x47, + 0xf5, 0xdf, 0x9e, 0xf1, 0xd8, 0xe3, 0xe7, 0xb5, 0xbd, 0x5b, 0xeb, 0xac, 0x1d, 0xef, 0x37, 0x76, + 0xd4, 0x5f, 0x11, 0x36, 0x61, 0x33, 0xc3, 0x6e, 0x92, 0x25, 0x3f, 0xa4, 0x84, 0x1d, 0xef, 0x26, + 0xeb, 0xc4, 0x1e, 0x4f, 0x6a, 0xc6, 0x09, 0x8a, 0x08, 0xd0, 0xee, 0x29, 0x8f, 0x3b, 0xee, 0xe9, + 0x1e, 0x75, 0xd7, 0x98, 0x35, 0x27, 0x10, 0x5c, 0x72, 0x82, 0x4b, 0x20, 0x47, 0x10, 0x12, 0x57, + 0xae, 0x1c, 0x42, 0x04, 0x22, 0x48, 0x2b, 0xc4, 0x21, 0x12, 0x07, 0x72, 0xb2, 0x88, 0x73, 0x42, + 0xfc, 0x03, 0x68, 0x4f, 0xa8, 0x7e, 0x74, 0xf5, 0x6f, 0xbb, 0xc7, 0x38, 0x16, 0x41, 0x9c, 0x3c, + 0x5d, 0xef, 0xbd, 0x4f, 0xbd, 0xaa, 0x7a, 0xf5, 0xde, 0xa7, 0xba, 0xda, 0xf0, 0xf2, 0xee, 0xb3, + 0x7e, 0xcd, 0x72, 0xeb, 0xbb, 0xc3, 0x2d, 0xe2, 0x39, 0x84, 0x12, 0xbf, 0xbe, 0x47, 0x9c, 0xae, + 0xeb, 0xd5, 0xa5, 0xc0, 0x18, 0x58, 0x75, 0x72, 0x8f, 0x12, 0xc7, 0xb7, 0x5c, 0xc7, 0xaf, 0xef, + 0x5d, 0xdf, 0x22, 0xd4, 0xb8, 0x5e, 0xef, 0x11, 0x87, 0x78, 0x06, 0x25, 0xdd, 0xda, 0xc0, 0x73, + 0xa9, 0x8b, 0x1e, 0x11, 0xea, 0x35, 0x63, 0x60, 0xd5, 0x42, 0xf5, 0x9a, 0x54, 0x5f, 0x7c, 0xb2, + 0x67, 0xd1, 0x9d, 0xe1, 0x56, 0xcd, 0x74, 0xfb, 0xf5, 0x9e, 0xdb, 0x73, 0xeb, 0xdc, 0x6a, 0x6b, + 0xb8, 0xcd, 0x9f, 0xf8, 0x03, 0xff, 0x25, 0xd0, 0x16, 0xf5, 0x48, 0xe7, 0xa6, 0xeb, 0x91, 0xfa, + 0x5e, 0xaa, 0xc7, 0xc5, 0xa7, 0x43, 0x9d, 0xbe, 0x61, 0xee, 0x58, 0x0e, 0xf1, 0xf6, 0xeb, 0x83, + 0xdd, 0x1e, 0x6b, 0xf0, 0xeb, 0x7d, 0x42, 0x8d, 0x2c, 0xab, 0x7a, 0x9e, 0x95, 0x37, 0x74, 0xa8, + 0xd5, 0x27, 0x29, 0x83, 0x9b, 0xc7, 0x19, 0xf8, 0xe6, 0x0e, 0xe9, 0x1b, 0x29, 0xbb, 0xa7, 0xf2, + 0xec, 0x86, 0xd4, 0xb2, 0xeb, 0x96, 0x43, 0x7d, 0xea, 0x25, 0x8d, 0xf4, 0xf7, 0x4a, 0x30, 0x79, + 0xdb, 0x20, 0x7d, 0xd7, 0x69, 0x13, 0x8a, 0xbe, 0x03, 0x55, 0x36, 0x8c, 0xae, 0x41, 0x8d, 0x05, + 0xed, 0x51, 0xed, 0xea, 0xd4, 0x8d, 0xaf, 0xd6, 0xc2, 0x69, 0x56, 0xa8, 0xb5, 0xc1, 0x6e, 0x8f, + 0x35, 0xf8, 0x35, 0xa6, 0x5d, 0xdb, 0xbb, 0x5e, 0xdb, 0xd8, 0x7a, 0x87, 0x98, 0x74, 0x9d, 0x50, + 0xa3, 0x81, 0xee, 0x1f, 0x2c, 0x9f, 0x3b, 0x3c, 0x58, 0x86, 0xb0, 0x0d, 0x2b, 0x54, 0xd4, 0x84, + 0x31, 0x7f, 0x40, 0xcc, 0x85, 0x12, 0x47, 0xbf, 0x56, 0x3b, 0x72, 0x11, 0x6b, 0xca, 0xb3, 0xf6, + 0x80, 0x98, 0x8d, 0xf3, 0x12, 0x79, 0x8c, 0x3d, 0x61, 0x8e, 0x83, 0xde, 0x80, 0x71, 0x9f, 0x1a, + 0x74, 0xe8, 0x2f, 0x94, 0x39, 0x62, 0xad, 0x30, 0x22, 0xb7, 0x6a, 0xcc, 0x48, 0xcc, 0x71, 0xf1, + 0x8c, 0x25, 0x9a, 0xfe, 0xf7, 0x12, 0x20, 0xa5, 0xbb, 0xe2, 0x3a, 0x5d, 0x8b, 0x5a, 0xae, 0x83, + 0x9e, 0x87, 0x31, 0xba, 0x3f, 0x20, 0x7c, 0x72, 0x26, 0x1b, 0x8f, 0x05, 0x0e, 0x75, 0xf6, 0x07, + 0xe4, 0xc1, 0xc1, 0xf2, 0xe5, 0xb4, 0x05, 0x93, 0x60, 0x6e, 0x83, 0xd6, 0x94, 0xab, 0x25, 0x6e, + 0xfd, 0x74, 0xbc, 0xeb, 0x07, 0x07, 0xcb, 0x19, 0x41, 0x58, 0x53, 0x48, 0x71, 0x07, 0xd1, 0x1e, + 0x20, 0xdb, 0xf0, 0x69, 0xc7, 0x33, 0x1c, 0x5f, 0xf4, 0x64, 0xf5, 0x89, 0x9c, 0x84, 0x27, 0x8a, + 0x2d, 0x1a, 0xb3, 0x68, 0x2c, 0x4a, 0x2f, 0xd0, 0x5a, 0x0a, 0x0d, 0x67, 0xf4, 0x80, 0x1e, 0x83, + 0x71, 0x8f, 0x18, 0xbe, 0xeb, 0x2c, 0x8c, 0xf1, 0x51, 0xa8, 0x09, 0xc4, 0xbc, 0x15, 0x4b, 0x29, + 0x7a, 0x1c, 0x26, 0xfa, 0xc4, 0xf7, 0x8d, 0x1e, 0x59, 0xa8, 0x70, 0xc5, 0x59, 0xa9, 0x38, 0xb1, + 0x2e, 0x9a, 0x71, 0x20, 0xd7, 0x3f, 0xd0, 0x60, 0x5a, 0xcd, 0xdc, 0x9a, 0xe5, 0x53, 0xf4, 0xcd, + 0x54, 0x1c, 0xd6, 0x8a, 0x0d, 0x89, 0x59, 0xf3, 0x28, 0xbc, 0x20, 0x7b, 0xab, 0x06, 0x2d, 0x91, + 0x18, 0x5c, 0x87, 0x8a, 0x45, 0x49, 0x9f, 0xad, 0x43, 0xf9, 0xea, 0xd4, 0x8d, 0xab, 0x45, 0x43, + 0xa6, 0x31, 0x2d, 0x41, 0x2b, 0xab, 0xcc, 0x1c, 0x0b, 0x14, 0xfd, 0xa7, 0x63, 0x11, 0xf7, 0x59, + 0x68, 0xa2, 0xb7, 0xa1, 0xea, 0x13, 0x9b, 0x98, 0xd4, 0xf5, 0xa4, 0xfb, 0x4f, 0x15, 0x74, 0xdf, + 0xd8, 0x22, 0x76, 0x5b, 0x9a, 0x36, 0xce, 0x33, 0xff, 0x83, 0x27, 0xac, 0x20, 0xd1, 0xeb, 0x50, + 0xa5, 0xa4, 0x3f, 0xb0, 0x0d, 0x4a, 0xe4, 0x3e, 0xfa, 0xff, 0xe8, 0x10, 0x58, 0xe4, 0x30, 0xb0, + 0x96, 0xdb, 0xed, 0x48, 0x35, 0xbe, 0x7d, 0xd4, 0x94, 0x04, 0xad, 0x58, 0xc1, 0xa0, 0x3d, 0x98, + 0x19, 0x0e, 0xba, 0x4c, 0x93, 0xb2, 0xec, 0xd0, 0xdb, 0x97, 0x91, 0x74, 0xb3, 0xe8, 0xdc, 0x6c, + 0xc6, 0xac, 0x1b, 0x97, 0x65, 0x5f, 0x33, 0xf1, 0x76, 0x9c, 0xe8, 0x05, 0xdd, 0x82, 0xd9, 0xbe, + 0xe5, 0x60, 0x62, 0x74, 0xf7, 0xdb, 0xc4, 0x74, 0x9d, 0xae, 0xcf, 0xc3, 0xaa, 0xd2, 0x98, 0x97, + 0x00, 0xb3, 0xeb, 0x71, 0x31, 0x4e, 0xea, 0xa3, 0x57, 0x01, 0x05, 0xc3, 0x78, 0x45, 0x24, 0x37, + 0xcb, 0x75, 0x78, 0xcc, 0x95, 0xc3, 0xe0, 0xee, 0xa4, 0x34, 0x70, 0x86, 0x15, 0x5a, 0x83, 0x39, + 0x8f, 0xec, 0x59, 0x6c, 0x8c, 0x77, 0x2d, 0x9f, 0xba, 0xde, 0xfe, 0x9a, 0xd5, 0xb7, 0xe8, 0xc2, + 0x38, 0xf7, 0x69, 0xe1, 0xf0, 0x60, 0x79, 0x0e, 0x67, 0xc8, 0x71, 0xa6, 0x95, 0xfe, 0xb3, 0x71, + 0x98, 0x4d, 0xe4, 0x1b, 0xf4, 0x06, 0x5c, 0x36, 0x87, 0x9e, 0x47, 0x1c, 0xda, 0x1c, 0xf6, 0xb7, + 0x88, 0xd7, 0x36, 0x77, 0x48, 0x77, 0x68, 0x93, 0x2e, 0x0f, 0x94, 0x4a, 0x63, 0x49, 0x7a, 0x7c, + 0x79, 0x25, 0x53, 0x0b, 0xe7, 0x58, 0xb3, 0x59, 0x70, 0x78, 0xd3, 0xba, 0xe5, 0xfb, 0x0a, 0xb3, + 0xc4, 0x31, 0xd5, 0x2c, 0x34, 0x53, 0x1a, 0x38, 0xc3, 0x8a, 0xf9, 0xd8, 0x25, 0xbe, 0xe5, 0x91, + 0x6e, 0xd2, 0xc7, 0x72, 0xdc, 0xc7, 0xdb, 0x99, 0x5a, 0x38, 0xc7, 0x1a, 0x3d, 0x03, 0x53, 0xa2, + 0x37, 0xbe, 0x7e, 0x72, 0xa1, 0x2f, 0x49, 0xb0, 0xa9, 0x66, 0x28, 0xc2, 0x51, 0x3d, 0x36, 0x34, + 0x77, 0xcb, 0x27, 0xde, 0x1e, 0xe9, 0xe6, 0x2f, 0xf0, 0x46, 0x4a, 0x03, 0x67, 0x58, 0xb1, 0xa1, + 0x89, 0x08, 0x4c, 0x0d, 0x6d, 0x3c, 0x3e, 0xb4, 0xcd, 0x4c, 0x2d, 0x9c, 0x63, 0xcd, 0xe2, 0x58, + 0xb8, 0x7c, 0x6b, 0xcf, 0xb0, 0x6c, 0x63, 0xcb, 0x26, 0x0b, 0x13, 0xf1, 0x38, 0x6e, 0xc6, 0xc5, + 0x38, 0xa9, 0x8f, 0x5e, 0x81, 0x8b, 0xa2, 0x69, 0xd3, 0x31, 0x14, 0x48, 0x95, 0x83, 0x3c, 0x2c, + 0x41, 0x2e, 0x36, 0x93, 0x0a, 0x38, 0x6d, 0x83, 0x9e, 0x87, 0x19, 0xd3, 0xb5, 0x6d, 0x1e, 0x8f, + 0x2b, 0xee, 0xd0, 0xa1, 0x0b, 0x93, 0x1c, 0x05, 0xb1, 0xfd, 0xb8, 0x12, 0x93, 0xe0, 0x84, 0x26, + 0x22, 0x00, 0x66, 0x50, 0x70, 0xfc, 0x05, 0xe0, 0xf9, 0xf1, 0x7a, 0xd1, 0x1c, 0xa0, 0x4a, 0x55, + 0xc8, 0x01, 0x54, 0x93, 0x8f, 0x23, 0xc0, 0xfa, 0x9f, 0x34, 0x98, 0xcf, 0x49, 0x1d, 0xe8, 0xa5, + 0x58, 0x89, 0xfd, 0x4a, 0xa2, 0xc4, 0x5e, 0xc9, 0x31, 0x8b, 0xd4, 0x59, 0x07, 0xa6, 0x3d, 0x36, + 0x2a, 0xa7, 0x27, 0x54, 0x64, 0x8e, 0x7c, 0xe6, 0x98, 0x61, 0xe0, 0xa8, 0x4d, 0x98, 0xf3, 0x2f, + 0x1e, 0x1e, 0x2c, 0x4f, 0xc7, 0x64, 0x38, 0x0e, 0xaf, 0xbf, 0x5f, 0x02, 0xb8, 0x4d, 0x06, 0xb6, + 0xbb, 0xdf, 0x27, 0xce, 0x59, 0x70, 0xa8, 0x8d, 0x18, 0x87, 0x7a, 0xf2, 0xb8, 0xe5, 0x51, 0xae, + 0xe5, 0x92, 0xa8, 0x37, 0x13, 0x24, 0xaa, 0x5e, 0x1c, 0xf2, 0x68, 0x16, 0xf5, 0xd7, 0x32, 0x5c, + 0x0a, 0x95, 0x43, 0x1a, 0xf5, 0x42, 0x6c, 0x8d, 0xbf, 0x9c, 0x58, 0xe3, 0xf9, 0x0c, 0x93, 0xcf, + 0x8d, 0x47, 0xbd, 0x03, 0x33, 0x8c, 0xe5, 0x88, 0xb5, 0xe4, 0x1c, 0x6a, 0x7c, 0x64, 0x0e, 0xa5, + 0xaa, 0xdd, 0x5a, 0x0c, 0x09, 0x27, 0x90, 0x73, 0x38, 0xdb, 0xc4, 0x17, 0x91, 0xb3, 0x7d, 0xa8, + 0xc1, 0x4c, 0xb8, 0x4c, 0x67, 0x40, 0xda, 0x9a, 0x71, 0xd2, 0xf6, 0x78, 0xe1, 0x10, 0xcd, 0x61, + 0x6d, 0xff, 0x64, 0x04, 0x5f, 0x29, 0xb1, 0x0d, 0xbe, 0x65, 0x98, 0xbb, 0xe8, 0x51, 0x18, 0x73, + 0x8c, 0x7e, 0x10, 0x99, 0x6a, 0xb3, 0x34, 0x8d, 0x3e, 0xc1, 0x5c, 0x82, 0xde, 0xd3, 0x00, 0xc9, + 0x2a, 0x70, 0xcb, 0x71, 0x5c, 0x6a, 0x88, 0x5c, 0x29, 0xdc, 0x5a, 0x2d, 0xec, 0x56, 0xd0, 0x63, + 0x6d, 0x33, 0x85, 0x75, 0xc7, 0xa1, 0xde, 0x7e, 0xb8, 0xc8, 0x69, 0x05, 0x9c, 0xe1, 0x00, 0x32, + 0x00, 0x3c, 0x89, 0xd9, 0x71, 0xe5, 0x46, 0x7e, 0xb2, 0x40, 0xce, 0x63, 0x06, 0x2b, 0xae, 0xb3, + 0x6d, 0xf5, 0xc2, 0xb4, 0x83, 0x15, 0x10, 0x8e, 0x80, 0x2e, 0xde, 0x81, 0xf9, 0x1c, 0x6f, 0xd1, + 0x05, 0x28, 0xef, 0x92, 0x7d, 0x31, 0x6d, 0x98, 0xfd, 0x44, 0x73, 0x50, 0xd9, 0x33, 0xec, 0xa1, + 0x48, 0xbf, 0x93, 0x58, 0x3c, 0x3c, 0x5f, 0x7a, 0x56, 0xd3, 0x3f, 0xa8, 0x44, 0x63, 0x87, 0x33, + 0xe6, 0xab, 0x50, 0xf5, 0xc8, 0xc0, 0xb6, 0x4c, 0xc3, 0x97, 0x44, 0x88, 0x93, 0x5f, 0x2c, 0xdb, + 0xb0, 0x92, 0xc6, 0xb8, 0x75, 0xe9, 0xf3, 0xe5, 0xd6, 0xe5, 0xd3, 0xe1, 0xd6, 0xdf, 0x86, 0xaa, + 0x1f, 0xb0, 0xea, 0x31, 0x0e, 0x79, 0x7d, 0x84, 0xfc, 0x2a, 0x09, 0xb5, 0xea, 0x40, 0x51, 0x69, + 0x05, 0x9a, 0x45, 0xa2, 0x2b, 0x23, 0x92, 0xe8, 0x53, 0x25, 0xbe, 0x2c, 0xdf, 0x0c, 0x8c, 0xa1, + 0x4f, 0xba, 0x3c, 0xb7, 0x55, 0xc3, 0x7c, 0xd3, 0xe2, 0xad, 0x58, 0x4a, 0xd1, 0xdb, 0xb1, 0x90, + 0xad, 0x9e, 0x24, 0x64, 0x67, 0xf2, 0xc3, 0x15, 0x6d, 0xc2, 0xfc, 0xc0, 0x73, 0x7b, 0x1e, 0xf1, + 0xfd, 0xdb, 0xc4, 0xe8, 0xda, 0x96, 0x43, 0x82, 0xf9, 0x11, 0x8c, 0xe8, 0xca, 0xe1, 0xc1, 0xf2, + 0x7c, 0x2b, 0x5b, 0x05, 0xe7, 0xd9, 0xea, 0xf7, 0xc7, 0xe0, 0x42, 0xb2, 0x02, 0xe6, 0x90, 0x54, + 0xed, 0x44, 0x24, 0xf5, 0x5a, 0x64, 0x33, 0x08, 0x06, 0xaf, 0x56, 0x3f, 0x63, 0x43, 0xdc, 0x82, + 0x59, 0x99, 0x0d, 0x02, 0xa1, 0xa4, 0xe9, 0x6a, 0xf5, 0x37, 0xe3, 0x62, 0x9c, 0xd4, 0x47, 0x2f, + 0xc0, 0xb4, 0xc7, 0x79, 0x77, 0x00, 0x20, 0xb8, 0xeb, 0x43, 0x12, 0x60, 0x1a, 0x47, 0x85, 0x38, + 0xae, 0xcb, 0x78, 0x6b, 0x48, 0x47, 0x03, 0x80, 0xb1, 0x38, 0x6f, 0xbd, 0x95, 0x54, 0xc0, 0x69, + 0x1b, 0xb4, 0x0e, 0x97, 0x86, 0x4e, 0x1a, 0x4a, 0x84, 0xf2, 0x15, 0x09, 0x75, 0x69, 0x33, 0xad, + 0x82, 0xb3, 0xec, 0xd0, 0x76, 0x8c, 0xca, 0x8e, 0xf3, 0xf4, 0x7c, 0xa3, 0xf0, 0xc6, 0x2b, 0xcc, + 0x65, 0x33, 0xe8, 0x76, 0xb5, 0x28, 0xdd, 0xd6, 0x7f, 0xaf, 0x45, 0x8b, 0x90, 0xa2, 0xc0, 0xc7, + 0xbd, 0x65, 0x4a, 0x59, 0x44, 0xd8, 0x91, 0x9b, 0xcd, 0x7e, 0x6f, 0x8e, 0xc4, 0x7e, 0xc3, 0xe2, + 0x79, 0x3c, 0xfd, 0xfd, 0x83, 0x06, 0xb3, 0x77, 0x3b, 0x9d, 0xd6, 0xaa, 0xc3, 0x77, 0x4b, 0xcb, + 0xa0, 0x3b, 0xac, 0x8a, 0x0e, 0x0c, 0xba, 0x93, 0xac, 0xa2, 0x4c, 0x86, 0xb9, 0x04, 0x3d, 0x0d, + 0x55, 0xf6, 0x97, 0x39, 0xce, 0xc3, 0x75, 0x92, 0x27, 0x99, 0x6a, 0x4b, 0xb6, 0x3d, 0x88, 0xfc, + 0xc6, 0x4a, 0x13, 0x7d, 0x03, 0x26, 0xd8, 0xde, 0x26, 0x4e, 0xb7, 0x20, 0xf9, 0x95, 0x4e, 0x35, + 0x84, 0x51, 0xc8, 0x67, 0x64, 0x03, 0x0e, 0xe0, 0xf4, 0x5d, 0x98, 0x8b, 0x0c, 0x02, 0x0f, 0x6d, + 0xf2, 0x06, 0xab, 0x57, 0xa8, 0x0d, 0x15, 0xd6, 0x3b, 0xab, 0x4a, 0xe5, 0x02, 0xaf, 0x17, 0x13, + 0x13, 0x11, 0x72, 0x0f, 0xf6, 0xe4, 0x63, 0x81, 0xa5, 0x6f, 0xc0, 0xc4, 0x6a, 0xab, 0x61, 0xbb, + 0x82, 0x6f, 0x98, 0x56, 0xd7, 0x4b, 0xce, 0xd4, 0xca, 0xea, 0x6d, 0x8c, 0xb9, 0x04, 0xe9, 0x30, + 0x4e, 0xee, 0x99, 0x64, 0x40, 0x39, 0xc5, 0x98, 0x6c, 0x00, 0x4b, 0xa4, 0x77, 0x78, 0x0b, 0x96, + 0x12, 0xfd, 0xc7, 0x25, 0x98, 0x90, 0xdd, 0x9e, 0xc1, 0xf9, 0x63, 0x2d, 0x76, 0xfe, 0x78, 0xa2, + 0xd8, 0x12, 0xe4, 0x1e, 0x3e, 0x3a, 0x89, 0xc3, 0xc7, 0xb5, 0x82, 0x78, 0x47, 0x9f, 0x3c, 0xde, + 0x2d, 0xc1, 0x4c, 0x7c, 0xf1, 0xd1, 0x33, 0x30, 0xc5, 0x52, 0xad, 0x65, 0x92, 0x66, 0xc8, 0xf0, + 0xd4, 0xeb, 0x87, 0x76, 0x28, 0xc2, 0x51, 0x3d, 0xd4, 0x53, 0x66, 0x2d, 0xd7, 0xa3, 0x72, 0xd0, + 0xf9, 0x53, 0x3a, 0xa4, 0x96, 0x5d, 0x13, 0x2f, 0xdb, 0x6b, 0xab, 0x0e, 0xdd, 0xf0, 0xda, 0xd4, + 0xb3, 0x9c, 0x5e, 0xaa, 0x23, 0x06, 0x86, 0xa3, 0xc8, 0xe8, 0x4d, 0x96, 0xf6, 0x7d, 0x77, 0xe8, + 0x99, 0x24, 0x8b, 0xbe, 0x05, 0xd4, 0x83, 0x6d, 0x84, 0xee, 0x9a, 0x6b, 0x1a, 0xb6, 0x58, 0x1c, + 0x4c, 0xb6, 0x89, 0x47, 0x1c, 0x93, 0x04, 0x94, 0x49, 0x40, 0x60, 0x05, 0xa6, 0xff, 0x46, 0x83, + 0x29, 0x39, 0x17, 0x67, 0x40, 0xd4, 0x5f, 0x8b, 0x13, 0xf5, 0xc7, 0x0a, 0xee, 0xd0, 0x6c, 0x96, + 0xfe, 0x5b, 0x0d, 0x16, 0x03, 0xd7, 0x5d, 0xa3, 0xdb, 0x30, 0x6c, 0xc3, 0x31, 0x89, 0x17, 0xc4, + 0xfa, 0x22, 0x94, 0xac, 0x81, 0x5c, 0x49, 0x90, 0x00, 0xa5, 0xd5, 0x16, 0x2e, 0x59, 0x03, 0x56, + 0x45, 0x77, 0x5c, 0x9f, 0x72, 0x36, 0x2f, 0x0e, 0x8a, 0xca, 0xeb, 0xbb, 0xb2, 0x1d, 0x2b, 0x0d, + 0xb4, 0x09, 0x95, 0x81, 0xeb, 0x51, 0x56, 0xb9, 0xca, 0x89, 0xf5, 0x3d, 0xc2, 0x6b, 0xb6, 0x6e, + 0x32, 0x10, 0xc3, 0x9d, 0xce, 0x60, 0xb0, 0x40, 0xd3, 0x7f, 0xa0, 0xc1, 0xc3, 0x19, 0xfe, 0x4b, + 0xd2, 0xd0, 0x85, 0x09, 0x4b, 0x08, 0x65, 0x7a, 0x79, 0xae, 0x58, 0xb7, 0x19, 0x53, 0x11, 0xa6, + 0xb6, 0x20, 0x85, 0x05, 0xd0, 0xfa, 0x2f, 0x35, 0xb8, 0x98, 0xf2, 0x97, 0xa7, 0x68, 0x16, 0xcf, + 0x92, 0x6d, 0xab, 0x14, 0xcd, 0xc2, 0x92, 0x4b, 0xd0, 0x6b, 0x50, 0xe5, 0x77, 0x44, 0xa6, 0x6b, + 0xcb, 0x09, 0xac, 0x07, 0x13, 0xd8, 0x92, 0xed, 0x0f, 0x0e, 0x96, 0xaf, 0x64, 0x9c, 0xb5, 0x03, + 0x31, 0x56, 0x00, 0x68, 0x19, 0x2a, 0xc4, 0xf3, 0x5c, 0x4f, 0x26, 0xfb, 0x49, 0x36, 0x53, 0x77, + 0x58, 0x03, 0x16, 0xed, 0xfa, 0xaf, 0xc2, 0x20, 0x65, 0xd9, 0x97, 0xf9, 0xc7, 0x16, 0x27, 0x99, + 0x18, 0xd9, 0xd2, 0x61, 0x2e, 0x41, 0x43, 0xb8, 0x60, 0x25, 0xd2, 0xb5, 0xdc, 0x9d, 0xf5, 0x62, + 0xd3, 0xa8, 0xcc, 0x1a, 0x0b, 0x12, 0xfe, 0x42, 0x52, 0x82, 0x53, 0x5d, 0xe8, 0x04, 0x52, 0x5a, + 0xe8, 0x75, 0x18, 0xdb, 0xa1, 0x74, 0x90, 0xf1, 0xb2, 0xff, 0x98, 0x22, 0x11, 0xba, 0x50, 0xe5, + 0xa3, 0xeb, 0x74, 0x5a, 0x98, 0x43, 0xe9, 0xbf, 0x2b, 0xa9, 0xf9, 0xe0, 0x27, 0xa4, 0xaf, 0xab, + 0xd1, 0xae, 0xd8, 0x86, 0xef, 0xf3, 0x14, 0x26, 0x4e, 0xf3, 0x73, 0x11, 0xc7, 0x95, 0x0c, 0xa7, + 0xb4, 0x51, 0x27, 0x2c, 0x9e, 0xda, 0x49, 0x8a, 0xe7, 0x54, 0x56, 0xe1, 0x44, 0x77, 0xa1, 0x4c, + 0xed, 0xa2, 0xa7, 0x72, 0x89, 0xd8, 0x59, 0x6b, 0x37, 0xa6, 0xe4, 0x94, 0x97, 0x3b, 0x6b, 0x6d, + 0xcc, 0x20, 0xd0, 0x06, 0x54, 0xbc, 0xa1, 0x4d, 0x58, 0x1d, 0x28, 0x17, 0xaf, 0x2b, 0x6c, 0x06, + 0xc3, 0xcd, 0xc7, 0x9e, 0x7c, 0x2c, 0x70, 0xf4, 0x1f, 0x6a, 0x30, 0x1d, 0xab, 0x16, 0xc8, 0x83, + 0xf3, 0x76, 0x64, 0xef, 0xc8, 0x79, 0x78, 0x76, 0xf4, 0x5d, 0x27, 0x37, 0xfd, 0x9c, 0xec, 0xf7, + 0x7c, 0x54, 0x86, 0x63, 0x7d, 0xe8, 0x06, 0x40, 0x38, 0x6c, 0xb6, 0x0f, 0x58, 0xf0, 0x8a, 0x0d, + 0x2f, 0xf7, 0x01, 0x8b, 0x69, 0x1f, 0x8b, 0x76, 0x74, 0x03, 0xc0, 0x27, 0xa6, 0x47, 0x68, 0x33, + 0x4c, 0x5c, 0xaa, 0x1c, 0xb7, 0x95, 0x04, 0x47, 0xb4, 0xf4, 0x5f, 0x94, 0x60, 0xba, 0x49, 0xe8, + 0x77, 0x5d, 0x6f, 0xb7, 0xe5, 0xda, 0x96, 0xb9, 0x7f, 0x06, 0x24, 0x00, 0xc7, 0x48, 0xc0, 0x71, + 0xf9, 0x32, 0xe6, 0x5d, 0x2e, 0x15, 0x78, 0x2b, 0x41, 0x05, 0x6e, 0x8c, 0x84, 0x7a, 0x34, 0x21, + 0xf8, 0x50, 0x83, 0xf9, 0x98, 0xfe, 0x9d, 0x30, 0xd7, 0xa8, 0xe4, 0xaf, 0x15, 0x4a, 0xfe, 0x31, + 0x18, 0x96, 0x30, 0xb3, 0x93, 0x3f, 0x5a, 0x83, 0x12, 0x75, 0xe5, 0xce, 0x18, 0x0d, 0x93, 0x10, + 0x2f, 0xac, 0x67, 0x1d, 0x17, 0x97, 0xa8, 0xab, 0xff, 0x51, 0x83, 0x85, 0x98, 0x56, 0x34, 0x5b, + 0x7e, 0x4e, 0x23, 0xc0, 0x30, 0xb6, 0xed, 0xb9, 0xfd, 0x13, 0x8f, 0x41, 0x2d, 0xf2, 0xcb, 0x9e, + 0xdb, 0xc7, 0x1c, 0x4b, 0xff, 0x48, 0x83, 0x8b, 0x31, 0xcd, 0x33, 0xe0, 0x24, 0xaf, 0xc7, 0x39, + 0xc9, 0xb5, 0x51, 0x06, 0x92, 0xc3, 0x4c, 0x3e, 0x2a, 0x25, 0x86, 0xc1, 0x06, 0x8c, 0xb6, 0x61, + 0x6a, 0xe0, 0x76, 0xdb, 0xa7, 0x70, 0xf9, 0x3b, 0xcb, 0xb8, 0x62, 0x2b, 0xc4, 0xc2, 0x51, 0x60, + 0x74, 0x0f, 0x2e, 0x32, 0xda, 0xe2, 0x0f, 0x0c, 0x93, 0xb4, 0x4f, 0xe1, 0x75, 0xd8, 0x43, 0xfc, + 0x76, 0x29, 0x89, 0x88, 0xd3, 0x9d, 0xa0, 0x75, 0x98, 0xb0, 0x06, 0xfc, 0xec, 0x22, 0x37, 0xe9, + 0xb1, 0x04, 0x4f, 0x9c, 0x74, 0x44, 0xf9, 0x90, 0x0f, 0x38, 0xc0, 0xd0, 0xff, 0x92, 0x8c, 0x06, + 0x4e, 0x85, 0x5f, 0x89, 0x50, 0x0f, 0x79, 0x0f, 0x74, 0x32, 0xda, 0xd1, 0x94, 0x2c, 0xe7, 0xa4, + 0xac, 0xbd, 0x9a, 0xe0, 0x44, 0x5f, 0x82, 0x09, 0xe2, 0x74, 0xf9, 0x41, 0x40, 0xbc, 0x64, 0xe1, + 0xa3, 0xba, 0x23, 0x9a, 0x70, 0x20, 0xd3, 0x7f, 0x54, 0x4e, 0x8c, 0x8a, 0x97, 0xf0, 0x77, 0x4e, + 0x2d, 0x38, 0xd4, 0x61, 0x22, 0x37, 0x40, 0xb6, 0x42, 0x6a, 0x29, 0x62, 0xfe, 0x6b, 0xa3, 0xc4, + 0x7c, 0xb4, 0xb6, 0xe6, 0x12, 0x4b, 0xf4, 0x2d, 0x18, 0x27, 0xa2, 0x0b, 0x51, 0xb1, 0x6f, 0x8e, + 0xd2, 0x45, 0x98, 0x7e, 0xc3, 0x94, 0x2d, 0xdb, 0x24, 0x2a, 0x7a, 0x89, 0xcd, 0x17, 0xd3, 0x65, + 0x47, 0x1e, 0xc1, 0xcc, 0x27, 0x1b, 0x8f, 0x88, 0x61, 0xab, 0xe6, 0x07, 0x07, 0xcb, 0x10, 0x3e, + 0xe2, 0xa8, 0x85, 0xfe, 0x3d, 0xb8, 0x94, 0x51, 0x22, 0x90, 0x19, 0x7b, 0x33, 0x24, 0x32, 0x66, + 0xbd, 0xd8, 0x32, 0x14, 0xbf, 0xe2, 0x7c, 0xbf, 0x04, 0x20, 0xdf, 0x45, 0x9d, 0xcd, 0x97, 0x55, + 0xa3, 0xdd, 0x0a, 0x86, 0xae, 0x9d, 0xda, 0xad, 0x60, 0x04, 0xf2, 0xe8, 0x52, 0xfc, 0x8f, 0x12, + 0x5c, 0x0a, 0x95, 0x0b, 0xdf, 0x0a, 0x66, 0x98, 0xfc, 0xef, 0xeb, 0xaa, 0x62, 0x37, 0x75, 0xe1, + 0xd4, 0xfd, 0xe7, 0xdd, 0xd4, 0x85, 0xbe, 0xe5, 0x54, 0xda, 0x5f, 0x97, 0xa2, 0x03, 0x18, 0xf1, + 0xba, 0xe8, 0x14, 0x3e, 0x30, 0xfa, 0xc2, 0xdd, 0x38, 0xe9, 0x7f, 0x2e, 0xc3, 0x85, 0xe4, 0x6e, + 0x8c, 0xdd, 0x2a, 0x68, 0xc7, 0xde, 0x2a, 0xb4, 0x60, 0x6e, 0x7b, 0x68, 0xdb, 0xfb, 0x7c, 0x0c, + 0x91, 0xab, 0x05, 0x71, 0x1f, 0xf1, 0x7f, 0xd2, 0x72, 0xee, 0xe5, 0x0c, 0x1d, 0x9c, 0x69, 0x99, + 0xbe, 0x64, 0x18, 0xfb, 0x77, 0x2f, 0x19, 0x2a, 0x27, 0xb8, 0x64, 0xc8, 0xbe, 0xa7, 0x29, 0x9f, + 0xe8, 0x9e, 0xe6, 0x24, 0x37, 0x0c, 0x19, 0x49, 0xec, 0xd8, 0x52, 0xf2, 0x22, 0xcc, 0xc4, 0x6f, + 0xbd, 0xc4, 0x5a, 0x8a, 0x8b, 0x37, 0x79, 0xc7, 0x14, 0x59, 0x4b, 0xd1, 0x8e, 0x95, 0x86, 0x7e, + 0xa8, 0xc1, 0xe5, 0xec, 0xaf, 0x5b, 0x90, 0x0d, 0x33, 0x7d, 0xe3, 0x5e, 0xf4, 0x8b, 0x23, 0xed, + 0x84, 0x4c, 0x89, 0x5f, 0x77, 0xac, 0xc7, 0xb0, 0x70, 0x02, 0x1b, 0xbd, 0x05, 0xd5, 0xbe, 0x71, + 0xaf, 0x3d, 0xf4, 0x7a, 0xe4, 0xc4, 0x8c, 0x8c, 0x6f, 0xa3, 0x75, 0x89, 0x82, 0x15, 0x9e, 0xfe, + 0x99, 0x06, 0xf3, 0x39, 0x97, 0x18, 0xff, 0x45, 0xa3, 0x7c, 0xb7, 0x04, 0x95, 0xb6, 0x69, 0xd8, + 0xe4, 0x0c, 0x08, 0xc5, 0xab, 0x31, 0x42, 0x71, 0xdc, 0x57, 0xb2, 0xdc, 0xab, 0x5c, 0x2e, 0x81, + 0x13, 0x5c, 0xe2, 0x89, 0x42, 0x68, 0x47, 0xd3, 0x88, 0xe7, 0x60, 0x52, 0x75, 0x3a, 0x5a, 0x76, + 0xd3, 0x7f, 0x5e, 0x82, 0xa9, 0x48, 0x17, 0x23, 0xe6, 0xc6, 0xed, 0x58, 0x41, 0x28, 0x17, 0x78, + 0x83, 0x14, 0xe9, 0xab, 0x16, 0x94, 0x00, 0xf1, 0x95, 0x47, 0x78, 0xaf, 0x9f, 0xae, 0x0c, 0x2f, + 0xc2, 0x0c, 0x35, 0xbc, 0x1e, 0xa1, 0xea, 0xc8, 0x20, 0x5e, 0x9e, 0xaa, 0xcf, 0x8d, 0x3a, 0x31, + 0x29, 0x4e, 0x68, 0x2f, 0xbe, 0x00, 0xd3, 0xb1, 0xce, 0x46, 0xf9, 0x48, 0xa3, 0xb1, 0x72, 0xff, + 0xd3, 0xa5, 0x73, 0x1f, 0x7f, 0xba, 0x74, 0xee, 0x93, 0x4f, 0x97, 0xce, 0x7d, 0xff, 0x70, 0x49, + 0xbb, 0x7f, 0xb8, 0xa4, 0x7d, 0x7c, 0xb8, 0xa4, 0x7d, 0x72, 0xb8, 0xa4, 0xfd, 0xed, 0x70, 0x49, + 0xfb, 0xc9, 0x67, 0x4b, 0xe7, 0xde, 0x7a, 0xe4, 0xc8, 0xff, 0xd9, 0xf8, 0x57, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x39, 0x36, 0x95, 0x55, 0xec, 0x31, 0x00, 0x00, } func (m *DaemonSet) Marshal() (dAtA []byte, err error) { @@ -2882,48 +2320,6 @@ func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FSGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FSGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -3006,7 +2402,7 @@ func (m *HTTPIngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *HostPortRange) Marshal() (dAtA []byte, err error) { +func (m *IPBlock) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3016,26 +2412,34 @@ func (m *HostPortRange) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *HostPortRange) MarshalTo(dAtA []byte) (int, error) { +func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *HostPortRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *IPBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + if len(m.Except) > 0 { + for iNdEx := len(m.Except) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Except[iNdEx]) + copy(dAtA[i:], m.Except[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Except[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.CIDR) + copy(dAtA[i:], m.CIDR) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *IDRange) Marshal() (dAtA []byte, err error) { +func (m *Ingress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3045,100 +2449,34 @@ func (m *IDRange) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *IDRange) MarshalTo(dAtA []byte) (int, error) { +func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *IDRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *IPBlock) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IPBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Except) > 0 { - for iNdEx := len(m.Except) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Except[iNdEx]) - copy(dAtA[i:], m.Except[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Except[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.CIDR) - copy(dAtA[i:], m.CIDR) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Ingress) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0x12 { @@ -4001,7 +3339,7 @@ func (m *NetworkPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { +func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4011,16 +3349,26 @@ func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodSecurityPolicy) MarshalTo(dAtA []byte) (int, error) { +func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSecurityPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a { size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -4044,7 +3392,60 @@ func (m *PodSecurityPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { +func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4054,12 +3455,12 @@ func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodSecurityPolicyList) MarshalTo(dAtA []byte) (int, error) { +func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSecurityPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ReplicaSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -4091,7 +3492,7 @@ func (m *PodSecurityPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { +func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4101,49 +3502,32 @@ func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { +func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSecurityPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ReplicaSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.RuntimeClass != nil { - { - size, err := m.RuntimeClass.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xc2 - } - if len(m.AllowedCSIDrivers) > 0 { - for iNdEx := len(m.AllowedCSIDrivers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AllowedCSIDrivers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xba + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.RunAsGroup != nil { + i-- + dAtA[i] = 0x1a + if m.Selector != nil { { - size, err := m.RunAsGroup.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -4151,63 +3535,40 @@ func (m *PodSecurityPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb2 - } - if len(m.AllowedProcMountTypes) > 0 { - for iNdEx := len(m.AllowedProcMountTypes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedProcMountTypes[iNdEx]) - copy(dAtA[i:], m.AllowedProcMountTypes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedProcMountTypes[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xaa - } - } - if len(m.ForbiddenSysctls) > 0 { - for iNdEx := len(m.ForbiddenSysctls) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ForbiddenSysctls[iNdEx]) - copy(dAtA[i:], m.ForbiddenSysctls[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ForbiddenSysctls[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa2 - } + dAtA[i] = 0x12 } - if len(m.AllowedUnsafeSysctls) > 0 { - for iNdEx := len(m.AllowedUnsafeSysctls) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedUnsafeSysctls[iNdEx]) - copy(dAtA[i:], m.AllowedUnsafeSysctls[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedUnsafeSysctls[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x9a - } + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 } - if len(m.AllowedFlexVolumes) > 0 { - for iNdEx := len(m.AllowedFlexVolumes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AllowedFlexVolumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x92 - } + return len(dAtA) - i, nil +} + +func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.AllowedHostPaths) > 0 { - for iNdEx := len(m.AllowedHostPaths) - 1; iNdEx >= 0; iNdEx-- { + return dAtA[:n], nil +} + +func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.AllowedHostPaths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -4215,167 +3576,148 @@ func (m *PodSecurityPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x8a - } - } - if m.AllowPrivilegeEscalation != nil { - i-- - if *m.AllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x80 - } - if m.DefaultAllowPrivilegeEscalation != nil { - i-- - if *m.DefaultAllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + dAtA[i] = 0x32 } - i-- - dAtA[i] = 0x78 } + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) i-- - if m.ReadOnlyRootFilesystem { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) i-- - dAtA[i] = 0x70 - { - size, err := m.FSGroup.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) i-- - dAtA[i] = 0x6a - { - size, err := m.SupplementalGroups.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x62 - { - size, err := m.RunAsUser.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a - { - size, err := m.SELinux.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) i-- - dAtA[i] = 0x52 + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) i-- - if m.HostIPC { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *RollbackConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollbackConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) i-- - dAtA[i] = 0x48 - i-- - if m.HostPID { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - i-- - dAtA[i] = 0x40 - if len(m.HostPorts) > 0 { - for iNdEx := len(m.HostPorts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.HostPorts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + return dAtA[:n], nil +} + +func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x3a + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - i-- - if m.HostNetwork { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 - if len(m.Volumes) > 0 { - for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Volumes[iNdEx]) - copy(dAtA[i:], m.Volumes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volumes[iNdEx]))) - i-- - dAtA[i] = 0x2a + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - if len(m.AllowedCapabilities) > 0 { - for iNdEx := len(m.AllowedCapabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedCapabilities[iNdEx]) - copy(dAtA[i:], m.AllowedCapabilities[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedCapabilities[iNdEx]))) - i-- - dAtA[i] = 0x22 - } + return len(dAtA) - i, nil +} + +func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.RequiredDropCapabilities) > 0 { - for iNdEx := len(m.RequiredDropCapabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.RequiredDropCapabilities[iNdEx]) - copy(dAtA[i:], m.RequiredDropCapabilities[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequiredDropCapabilities[iNdEx]))) - i-- - dAtA[i] = 0x1a + return dAtA[:n], nil +} + +func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - if len(m.DefaultAddCapabilities) > 0 { - for iNdEx := len(m.DefaultAddCapabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.DefaultAddCapabilities[iNdEx]) - copy(dAtA[i:], m.DefaultAddCapabilities[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DefaultAddCapabilities[iNdEx]))) - i-- - dAtA[i] = 0x12 + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - i-- - if m.Privileged { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 return len(dAtA) - i, nil } -func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { +func (m *Scale) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4385,12 +3727,12 @@ func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { +func (m *Scale) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -4428,7 +3770,7 @@ func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { +func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4438,50 +3780,23 @@ func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { +func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReplicaSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x2a - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x22 - { - size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x8 return len(dAtA) - i, nil } -func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { +func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4491,687 +3806,610 @@ func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { +func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReplicaSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + i -= len(m.TargetSelector) + copy(dAtA[i:], m.TargetSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) + i-- + dAtA[i] = 0x1a + if len(m.Selector) > 0 { + keysForSelector := make([]string, 0, len(m.Selector)) + for k := range m.Selector { + keysForSelector = append(keysForSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) i-- dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } } + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x8 return len(dAtA) - i, nil } -func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ } - return dAtA[:n], nil + dAtA[offset] = uint8(v) + return base +} +func (m *DaemonSet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *DaemonSetCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ReplicaSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *DaemonSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - i-- - dAtA[i] = 0x20 - { - size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x1a + return n +} + +func (m *DaemonSetSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l if m.Selector != nil { - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if m.Replicas != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - i-- - dAtA[i] = 0x8 + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.UpdateStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + n += 1 + sovGenerated(uint64(m.TemplateGeneration)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) } - return len(dAtA) - i, nil + return n } -func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *DaemonSetStatus) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l + n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberMisscheduled)) + n += 1 + sovGenerated(uint64(m.DesiredNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberReady)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.UpdatedNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberAvailable)) + n += 1 + sovGenerated(uint64(m.NumberUnavailable)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - i-- - dAtA[i] = 0x28 - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - i-- - dAtA[i] = 0x20 - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - i-- - dAtA[i] = 0x18 - i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + return n } -func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *DaemonSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *RollbackConfig) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RollbackConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) } - return dAtA[:n], nil + return n } -func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *Deployment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *RollingUpdateDaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *DeploymentCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if m.MaxSurge != nil { - { - size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentList) Size() (n int) { + if m == nil { + return 0 } - if m.MaxUnavailable != nil { - { - size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return n } -func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *DeploymentRollback) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - if m.MaxSurge != nil { - { - size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.MaxUnavailable != nil { - { - size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.UpdatedAnnotations) > 0 { + for k, v := range m.UpdatedAnnotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + l = m.RollbackTo.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *DeploymentSpec) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *RunAsGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Strategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + n += 2 + if m.RollbackTo != nil { + l = m.RollbackTo.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ProgressDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds)) + } + return n } -func (m *RunAsGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *DeploymentStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } + return n } -func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *DeploymentStrategy) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *HTTPIngressPath) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Backend.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.PathType != nil { + l = len(*m.PathType) + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *RunAsUserStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *HTTPIngressRuleValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + if len(m.Paths) > 0 { + for _, e := range m.Paths { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *RuntimeClassStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *IPBlock) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *RuntimeClassStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RuntimeClassStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - if m.DefaultRuntimeClassName != nil { - i -= len(*m.DefaultRuntimeClassName) - copy(dAtA[i:], *m.DefaultRuntimeClassName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DefaultRuntimeClassName))) - i-- - dAtA[i] = 0x12 - } - if len(m.AllowedRuntimeClassNames) > 0 { - for iNdEx := len(m.AllowedRuntimeClassNames) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedRuntimeClassNames[iNdEx]) - copy(dAtA[i:], m.AllowedRuntimeClassNames[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedRuntimeClassNames[iNdEx]))) - i-- - dAtA[i] = 0xa + l = len(m.CIDR) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Except) > 0 { + for _, s := range m.Except { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) } } - return len(dAtA) - i, nil + return n } -func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *Ingress) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SELinuxStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - if m.SELinuxOptions != nil { - { - size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *Scale) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *IngressBackend) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *Scale) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + l = len(m.ServiceName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ServicePort.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *IngressList) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - return dAtA[:n], nil -} - -func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return n } -func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *IngressLoadBalancerIngress) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - i -= len(m.TargetSelector) - copy(dAtA[i:], m.TargetSelector) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) - i-- - dAtA[i] = 0x1a - if len(m.Selector) > 0 { - keysForSelector := make([]string, 0, len(m.Selector)) - for k := range m.Selector { - keysForSelector = append(keysForSelector, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { - v := m.Selector[string(keysForSelector[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForSelector[iNdEx]) - copy(dAtA[i:], keysForSelector[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + return n } -func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *IngressLoadBalancerStatus) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SupplementalGroupsStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *AllowedCSIDriver) Size() (n int) { +func (m *IngressPortStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Name) + n += 1 + sovGenerated(uint64(m.Port)) + l = len(m.Protocol) n += 1 + l + sovGenerated(uint64(l)) + if m.Error != nil { + l = len(*m.Error) + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func (m *AllowedFlexVolume) Size() (n int) { +func (m *IngressRule) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Driver) + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = m.IngressRuleValue.Size() n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *AllowedHostPath) Size() (n int) { +func (m *IngressRuleValue) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.PathPrefix) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 + if m.HTTP != nil { + l = m.HTTP.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func (m *DaemonSet) Size() (n int) { +func (m *IngressSpec) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Backend != nil { + l = m.Backend.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.TLS) > 0 { + for _, e := range m.TLS { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.IngressClassName != nil { + l = len(*m.IngressClassName) + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func (m *DaemonSetCondition) Size() (n int) { +func (m *IngressStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Type) + l = m.LoadBalancer.Size() n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) + return n +} + +func (m *IngressTLS) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.SecretName) n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() + return n +} + +func (m *NetworkPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) + l = m.Spec.Size() n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) + l = m.Status.Size() n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *DaemonSetList) Size() (n int) { +func (m *NetworkPolicyEgressRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.To) > 0 { + for _, e := range m.To { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetworkPolicyIngressRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.From) > 0 { + for _, e := range m.From { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetworkPolicyList) Size() (n int) { if m == nil { return 0 } @@ -5188,70 +4426,92 @@ func (m *DaemonSetList) Size() (n int) { return n } -func (m *DaemonSetSpec) Size() (n int) { +func (m *NetworkPolicyPeer) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Selector != nil { - l = m.Selector.Size() + if m.PodSelector != nil { + l = m.PodSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.UpdateStrategy.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - n += 1 + sovGenerated(uint64(m.TemplateGeneration)) - if m.RevisionHistoryLimit != nil { - n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.IPBlock != nil { + l = m.IPBlock.Size() + n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *DaemonSetStatus) Size() (n int) { +func (m *NetworkPolicyPort) Size() (n int) { if m == nil { return 0 } var l int _ = l - n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) - n += 1 + sovGenerated(uint64(m.NumberMisscheduled)) - n += 1 + sovGenerated(uint64(m.DesiredNumberScheduled)) - n += 1 + sovGenerated(uint64(m.NumberReady)) - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - n += 1 + sovGenerated(uint64(m.UpdatedNumberScheduled)) - n += 1 + sovGenerated(uint64(m.NumberAvailable)) - n += 1 + sovGenerated(uint64(m.NumberUnavailable)) - if m.CollisionCount != nil { - n += 1 + sovGenerated(uint64(*m.CollisionCount)) + if m.Protocol != nil { + l = len(*m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) } - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { + if m.Port != nil { + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.EndPort != nil { + n += 1 + sovGenerated(uint64(*m.EndPort)) + } + return n +} + +func (m *NetworkPolicySpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.PodSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Egress) > 0 { + for _, e := range m.Egress { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } + if len(m.PolicyTypes) > 0 { + for _, s := range m.PolicyTypes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } -func (m *DaemonSetUpdateStrategy) Size() (n int) { +func (m *NetworkPolicyStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if m.RollingUpdate != nil { - l = m.RollingUpdate.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } return n } -func (m *Deployment) Size() (n int) { +func (m *ReplicaSet) Size() (n int) { if m == nil { return 0 } @@ -5266,7 +4526,7 @@ func (m *Deployment) Size() (n int) { return n } -func (m *DeploymentCondition) Size() (n int) { +func (m *ReplicaSetCondition) Size() (n int) { if m == nil { return 0 } @@ -5276,18 +4536,16 @@ func (m *DeploymentCondition) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Status) n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) l = len(m.Reason) n += 1 + l + sovGenerated(uint64(l)) l = len(m.Message) n += 1 + l + sovGenerated(uint64(l)) - l = m.LastUpdateTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *DeploymentList) Size() (n int) { +func (m *ReplicaSetList) Size() (n int) { if m == nil { return 0 } @@ -5304,28 +4562,7 @@ func (m *DeploymentList) Size() (n int) { return n } -func (m *DeploymentRollback) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.UpdatedAnnotations) > 0 { - for k, v := range m.UpdatedAnnotations { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = m.RollbackTo.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *DeploymentSpec) Size() (n int) { +func (m *ReplicaSetSpec) Size() (n int) { if m == nil { return 0 } @@ -5340,151 +4577,75 @@ func (m *DeploymentSpec) Size() (n int) { } l = m.Template.Size() n += 1 + l + sovGenerated(uint64(l)) - l = m.Strategy.Size() - n += 1 + l + sovGenerated(uint64(l)) n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) - } - n += 2 - if m.RollbackTo != nil { - l = m.RollbackTo.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ProgressDeadlineSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds)) - } return n } -func (m *DeploymentStatus) Size() (n int) { +func (m *ReplicaSetStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) n += 1 + sovGenerated(uint64(m.Replicas)) - n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) n += 1 + sovGenerated(uint64(m.AvailableReplicas)) - n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) if len(m.Conditions) > 0 { for _, e := range m.Conditions { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } - n += 1 + sovGenerated(uint64(m.ReadyReplicas)) - if m.CollisionCount != nil { - n += 1 + sovGenerated(uint64(*m.CollisionCount)) - } return n } -func (m *DeploymentStrategy) Size() (n int) { +func (m *RollbackConfig) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if m.RollingUpdate != nil { - l = m.RollingUpdate.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + n += 1 + sovGenerated(uint64(m.Revision)) return n } -func (m *FSGroupStrategyOptions) Size() (n int) { +func (m *RollingUpdateDaemonSet) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *HTTPIngressPath) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Backend.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.PathType != nil { - l = len(*m.PathType) + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() n += 1 + l + sovGenerated(uint64(l)) } - return n -} - -func (m *HTTPIngressRuleValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Paths) > 0 { - for _, e := range m.Paths { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *HostPortRange) Size() (n int) { - if m == nil { - return 0 + if m.MaxSurge != nil { + l = m.MaxSurge.Size() + n += 1 + l + sovGenerated(uint64(l)) } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Min)) - n += 1 + sovGenerated(uint64(m.Max)) return n } -func (m *IDRange) Size() (n int) { +func (m *RollingUpdateDeployment) Size() (n int) { if m == nil { return 0 } var l int _ = l - n += 1 + sovGenerated(uint64(m.Min)) - n += 1 + sovGenerated(uint64(m.Max)) - return n -} - -func (m *IPBlock) Size() (n int) { - if m == nil { - return 0 + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) } - var l int - _ = l - l = len(m.CIDR) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Except) > 0 { - for _, s := range m.Except { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + if m.MaxSurge != nil { + l = m.MaxSurge.Size() + n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *Ingress) Size() (n int) { +func (m *Scale) Size() (n int) { if m == nil { return 0 } @@ -5499,4006 +4660,729 @@ func (m *Ingress) Size() (n int) { return n } -func (m *IngressBackend) Size() (n int) { +func (m *ScaleSpec) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.ServiceName) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ServicePort.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Resource != nil { - l = m.Resource.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + n += 1 + sovGenerated(uint64(m.Replicas)) return n } -func (m *IngressList) Size() (n int) { +func (m *ScaleStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Replicas)) + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } - return n -} - -func (m *IngressLoadBalancerIngress) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.IP) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Hostname) + l = len(m.TargetSelector) n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } return n } -func (m *IngressLoadBalancerStatus) Size() (n int) { - if m == nil { - return 0 +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *DaemonSet) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.Ingress) > 0 { - for _, e := range m.Ingress { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&DaemonSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetCondition) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&DaemonSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s } - -func (m *IngressPortStatus) Size() (n int) { - if m == nil { - return 0 +func (this *DaemonSetList) String() string { + if this == nil { + return "nil" } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Port)) - l = len(m.Protocol) - n += 1 + l + sovGenerated(uint64(l)) - if m.Error != nil { - l = len(*m.Error) - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForItems := "[]DaemonSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + "," } - return n + repeatedStringForItems += "}" + s := strings.Join([]string{`&DaemonSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s } - -func (m *IngressRule) Size() (n int) { - if m == nil { - return 0 +func (this *DaemonSetSpec) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Host) - n += 1 + l + sovGenerated(uint64(l)) - l = m.IngressRuleValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&DaemonSetSpec{`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `TemplateGeneration:` + fmt.Sprintf("%v", this.TemplateGeneration) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `}`, + }, "") + return s } - -func (m *IngressRuleValue) Size() (n int) { - if m == nil { - return 0 +func (this *DaemonSetStatus) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if m.HTTP != nil { - l = m.HTTP.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForConditions := "[]DaemonSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + "," } - return n + repeatedStringForConditions += "}" + s := strings.Join([]string{`&DaemonSetStatus{`, + `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, + `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, + `DesiredNumberScheduled:` + fmt.Sprintf("%v", this.DesiredNumberScheduled) + `,`, + `NumberReady:` + fmt.Sprintf("%v", this.NumberReady) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `UpdatedNumberScheduled:` + fmt.Sprintf("%v", this.UpdatedNumberScheduled) + `,`, + `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, + `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s } - -func (m *IngressSpec) Size() (n int) { - if m == nil { - return 0 +func (this *DaemonSetUpdateStrategy) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if m.Backend != nil { - l = m.Backend.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Deployment) String() string { + if this == nil { + return "nil" } - if len(m.TLS) > 0 { - for _, e := range m.TLS { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.IngressClassName != nil { - l = len(*m.IngressClassName) - n += 1 + l + sovGenerated(uint64(l)) - } - return n + s := strings.Join([]string{`&Deployment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *IngressStatus) Size() (n int) { - if m == nil { - return 0 +func (this *DeploymentCondition) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.LoadBalancer.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&DeploymentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *IngressTLS) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Hosts) > 0 { - for _, s := range m.Hosts { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *DeploymentList) String() string { + if this == nil { + return "nil" } - l = len(m.SecretName) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NetworkPolicy) Size() (n int) { - if m == nil { - return 0 + repeatedStringForItems := "[]Deployment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + repeatedStringForItems += "}" + s := strings.Join([]string{`&DeploymentList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyEgressRule) Size() (n int) { - if m == nil { - return 0 +func (this *DeploymentRollback) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + keysForUpdatedAnnotations := make([]string, 0, len(this.UpdatedAnnotations)) + for k := range this.UpdatedAnnotations { + keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, k) } - if len(m.To) > 0 { - for _, e := range m.To { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) + mapStringForUpdatedAnnotations := "map[string]string{" + for _, k := range keysForUpdatedAnnotations { + mapStringForUpdatedAnnotations += fmt.Sprintf("%v: %v,", k, this.UpdatedAnnotations[k]) } - return n + mapStringForUpdatedAnnotations += "}" + s := strings.Join([]string{`&DeploymentRollback{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UpdatedAnnotations:` + mapStringForUpdatedAnnotations + `,`, + `RollbackTo:` + strings.Replace(strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyIngressRule) Size() (n int) { - if m == nil { - return 0 +func (this *DeploymentSpec) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&DeploymentSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, + `RollbackTo:` + strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1) + `,`, + `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStatus) String() string { + if this == nil { + return "nil" } - if len(m.From) > 0 { - for _, e := range m.From { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForConditions := "[]DeploymentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," } - return n + repeatedStringForConditions += "}" + s := strings.Join([]string{`&DeploymentStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *DeploymentStrategy) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&DeploymentStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyPeer) Size() (n int) { - if m == nil { - return 0 +func (this *HTTPIngressPath) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if m.PodSelector != nil { - l = m.PodSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&HTTPIngressPath{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`, + `PathType:` + valueToStringGenerated(this.PathType) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPIngressRuleValue) String() string { + if this == nil { + return "nil" } - if m.NamespaceSelector != nil { - l = m.NamespaceSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForPaths := "[]HTTPIngressPath{" + for _, f := range this.Paths { + repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + "," } - if m.IPBlock != nil { - l = m.IPBlock.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForPaths += "}" + s := strings.Join([]string{`&HTTPIngressRuleValue{`, + `Paths:` + repeatedStringForPaths + `,`, + `}`, + }, "") + return s +} +func (this *IPBlock) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&IPBlock{`, + `CIDR:` + fmt.Sprintf("%v", this.CIDR) + `,`, + `Except:` + fmt.Sprintf("%v", this.Except) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyPort) Size() (n int) { - if m == nil { - return 0 +func (this *Ingress) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if m.Protocol != nil { - l = len(*m.Protocol) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Port != nil { - l = m.Port.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.EndPort != nil { - n += 1 + sovGenerated(uint64(*m.EndPort)) + s := strings.Join([]string{`&Ingress{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressSpec", "IngressSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IngressStatus", "IngressStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressBackend) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&IngressBackend{`, + `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, + `ServicePort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServicePort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "TypedLocalObjectReference", "v11.TypedLocalObjectReference", 1) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicySpec) Size() (n int) { - if m == nil { - return 0 +func (this *IngressList) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.PodSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ingress) > 0 { - for _, e := range m.Ingress { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForItems := "[]Ingress{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Ingress", "Ingress", 1), `&`, ``, 1) + "," } - if len(m.Egress) > 0 { - for _, e := range m.Egress { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForItems += "}" + s := strings.Join([]string{`&IngressList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *IngressLoadBalancerIngress) String() string { + if this == nil { + return "nil" } - if len(m.PolicyTypes) > 0 { - for _, s := range m.PolicyTypes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForPorts := "[]IngressPortStatus{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "IngressPortStatus", "IngressPortStatus", 1), `&`, ``, 1) + "," } - return n + repeatedStringForPorts += "}" + s := strings.Join([]string{`&IngressLoadBalancerIngress{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyStatus) Size() (n int) { - if m == nil { - return 0 +func (this *IngressLoadBalancerStatus) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForIngress := "[]IngressLoadBalancerIngress{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "IngressLoadBalancerIngress", "IngressLoadBalancerIngress", 1), `&`, ``, 1) + "," } - return n + repeatedStringForIngress += "}" + s := strings.Join([]string{`&IngressLoadBalancerStatus{`, + `Ingress:` + repeatedStringForIngress + `,`, + `}`, + }, "") + return s } - -func (m *PodSecurityPolicy) Size() (n int) { - if m == nil { - return 0 +func (this *IngressPortStatus) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&IngressPortStatus{`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `Error:` + valueToStringGenerated(this.Error) + `,`, + `}`, + }, "") + return s } - -func (m *PodSecurityPolicyList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *IngressRule) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&IngressRule{`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `IngressRuleValue:` + strings.Replace(strings.Replace(this.IngressRuleValue.String(), "IngressRuleValue", "IngressRuleValue", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *PodSecurityPolicySpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 2 - if len(m.DefaultAddCapabilities) > 0 { - for _, s := range m.DefaultAddCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *IngressRuleValue) String() string { + if this == nil { + return "nil" } - if len(m.RequiredDropCapabilities) > 0 { - for _, s := range m.RequiredDropCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&IngressRuleValue{`, + `HTTP:` + strings.Replace(this.HTTP.String(), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressSpec) String() string { + if this == nil { + return "nil" } - if len(m.AllowedCapabilities) > 0 { - for _, s := range m.AllowedCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForTLS := "[]IngressTLS{" + for _, f := range this.TLS { + repeatedStringForTLS += strings.Replace(strings.Replace(f.String(), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + "," } - if len(m.Volumes) > 0 { - for _, s := range m.Volumes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForTLS += "}" + repeatedStringForRules := "[]IngressRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "IngressRule", "IngressRule", 1), `&`, ``, 1) + "," } - n += 2 - if len(m.HostPorts) > 0 { - for _, e := range m.HostPorts { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForRules += "}" + s := strings.Join([]string{`&IngressSpec{`, + `Backend:` + strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1) + `,`, + `TLS:` + repeatedStringForTLS + `,`, + `Rules:` + repeatedStringForRules + `,`, + `IngressClassName:` + valueToStringGenerated(this.IngressClassName) + `,`, + `}`, + }, "") + return s +} +func (this *IngressStatus) String() string { + if this == nil { + return "nil" } - n += 2 - n += 2 - l = m.SELinux.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.RunAsUser.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.SupplementalGroups.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.FSGroup.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.DefaultAllowPrivilegeEscalation != nil { - n += 2 + s := strings.Join([]string{`&IngressStatus{`, + `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "IngressLoadBalancerStatus", "IngressLoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressTLS) String() string { + if this == nil { + return "nil" } - if m.AllowPrivilegeEscalation != nil { - n += 3 + s := strings.Join([]string{`&IngressTLS{`, + `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicy) String() string { + if this == nil { + return "nil" } - if len(m.AllowedHostPaths) > 0 { - for _, e := range m.AllowedHostPaths { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&NetworkPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NetworkPolicyStatus", "NetworkPolicyStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicyEgressRule) String() string { + if this == nil { + return "nil" } - if len(m.AllowedFlexVolumes) > 0 { - for _, e := range m.AllowedFlexVolumes { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } + repeatedStringForPorts := "[]NetworkPolicyPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," } - if len(m.AllowedUnsafeSysctls) > 0 { - for _, s := range m.AllowedUnsafeSysctls { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } + repeatedStringForPorts += "}" + repeatedStringForTo := "[]NetworkPolicyPeer{" + for _, f := range this.To { + repeatedStringForTo += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," } - if len(m.ForbiddenSysctls) > 0 { - for _, s := range m.ForbiddenSysctls { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.AllowedProcMountTypes) > 0 { - for _, s := range m.AllowedProcMountTypes { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.RunAsGroup != nil { - l = m.RunAsGroup.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if len(m.AllowedCSIDrivers) > 0 { - for _, e := range m.AllowedCSIDrivers { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.RuntimeClass != nil { - l = m.RuntimeClass.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ReplicaSet) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + repeatedStringForTo += "}" + s := strings.Join([]string{`&NetworkPolicyEgressRule{`, + `Ports:` + repeatedStringForPorts + `,`, + `To:` + repeatedStringForTo + `,`, + `}`, + }, "") + return s } - -func (m *ReplicaSetCondition) Size() (n int) { - if m == nil { - return 0 +func (this *NetworkPolicyIngressRule) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ReplicaSetList) Size() (n int) { - if m == nil { - return 0 + repeatedStringForPorts := "[]NetworkPolicyPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForPorts += "}" + repeatedStringForFrom := "[]NetworkPolicyPeer{" + for _, f := range this.From { + repeatedStringForFrom += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," } - return n + repeatedStringForFrom += "}" + s := strings.Join([]string{`&NetworkPolicyIngressRule{`, + `Ports:` + repeatedStringForPorts + `,`, + `From:` + repeatedStringForFrom + `,`, + `}`, + }, "") + return s } - -func (m *ReplicaSetSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) +func (this *NetworkPolicyList) String() string { + if this == nil { + return "nil" } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForItems := "[]NetworkPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + "," } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - return n + repeatedStringForItems += "}" + s := strings.Join([]string{`&NetworkPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s } - -func (m *ReplicaSetStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - n += 1 + sovGenerated(uint64(m.ReadyReplicas)) - n += 1 + sovGenerated(uint64(m.AvailableReplicas)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *NetworkPolicyPeer) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&NetworkPolicyPeer{`, + `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `IPBlock:` + strings.Replace(this.IPBlock.String(), "IPBlock", "IPBlock", 1) + `,`, + `}`, + }, "") + return s } - -func (m *RollbackConfig) Size() (n int) { - if m == nil { - return 0 +func (this *NetworkPolicyPort) String() string { + if this == nil { + return "nil" } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Revision)) - return n + s := strings.Join([]string{`&NetworkPolicyPort{`, + `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, + `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1) + `,`, + `EndPort:` + valueToStringGenerated(this.EndPort) + `,`, + `}`, + }, "") + return s } - -func (m *RollingUpdateDaemonSet) Size() (n int) { - if m == nil { - return 0 +func (this *NetworkPolicySpec) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if m.MaxUnavailable != nil { - l = m.MaxUnavailable.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForIngress := "[]NetworkPolicyIngressRule{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + "," } - if m.MaxSurge != nil { - l = m.MaxSurge.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForIngress += "}" + repeatedStringForEgress := "[]NetworkPolicyEgressRule{" + for _, f := range this.Egress { + repeatedStringForEgress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + "," } - return n + repeatedStringForEgress += "}" + s := strings.Join([]string{`&NetworkPolicySpec{`, + `PodSelector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `Ingress:` + repeatedStringForIngress + `,`, + `Egress:` + repeatedStringForEgress + `,`, + `PolicyTypes:` + fmt.Sprintf("%v", this.PolicyTypes) + `,`, + `}`, + }, "") + return s } - -func (m *RollingUpdateDeployment) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.MaxUnavailable != nil { - l = m.MaxUnavailable.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (this *NetworkPolicyStatus) String() string { + if this == nil { + return "nil" } - if m.MaxSurge != nil { - l = m.MaxSurge.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," } - return n + repeatedStringForConditions += "}" + s := strings.Join([]string{`&NetworkPolicyStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s } - -func (m *RunAsGroupStrategyOptions) Size() (n int) { - if m == nil { - return 0 +func (this *ReplicaSet) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&ReplicaSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetCondition) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&ReplicaSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s } - -func (m *RunAsUserStrategyOptions) Size() (n int) { - if m == nil { - return 0 +func (this *ReplicaSetList) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *RuntimeClassStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.AllowedRuntimeClassNames) > 0 { - for _, s := range m.AllowedRuntimeClassNames { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.DefaultRuntimeClassName != nil { - l = len(*m.DefaultRuntimeClassName) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *SELinuxStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if m.SELinuxOptions != nil { - l = m.SELinuxOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *Scale) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ScaleSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - return n -} - -func (m *ScaleStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - if len(m.Selector) > 0 { - for k, v := range m.Selector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.TargetSelector) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SupplementalGroupsStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AllowedCSIDriver) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedCSIDriver{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *AllowedFlexVolume) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedFlexVolume{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `}`, - }, "") - return s -} -func (this *AllowedHostPath) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedHostPath{`, - `PathPrefix:` + fmt.Sprintf("%v", this.PathPrefix) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSetCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]DaemonSet{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&DaemonSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSetSpec{`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `TemplateGeneration:` + fmt.Sprintf("%v", this.TemplateGeneration) + `,`, - `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]DaemonSetCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&DaemonSetStatus{`, - `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, - `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, - `DesiredNumberScheduled:` + fmt.Sprintf("%v", this.DesiredNumberScheduled) + `,`, - `NumberReady:` + fmt.Sprintf("%v", this.NumberReady) + `,`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `UpdatedNumberScheduled:` + fmt.Sprintf("%v", this.UpdatedNumberScheduled) + `,`, - `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, - `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, - `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetUpdateStrategy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Deployment) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Deployment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]Deployment{" + repeatedStringForItems := "[]ReplicaSet{" for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + "," } repeatedStringForItems += "}" - s := strings.Join([]string{`&DeploymentList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentRollback) String() string { - if this == nil { - return "nil" - } - keysForUpdatedAnnotations := make([]string, 0, len(this.UpdatedAnnotations)) - for k := range this.UpdatedAnnotations { - keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) - mapStringForUpdatedAnnotations := "map[string]string{" - for _, k := range keysForUpdatedAnnotations { - mapStringForUpdatedAnnotations += fmt.Sprintf("%v: %v,", k, this.UpdatedAnnotations[k]) - } - mapStringForUpdatedAnnotations += "}" - s := strings.Join([]string{`&DeploymentRollback{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `UpdatedAnnotations:` + mapStringForUpdatedAnnotations + `,`, - `RollbackTo:` + strings.Replace(strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentSpec{`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, - `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, - `RollbackTo:` + strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1) + `,`, - `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]DeploymentCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&DeploymentStatus{`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, - `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, - `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, - `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentStrategy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentStrategy{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, - `}`, - }, "") - return s -} -func (this *FSGroupStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&FSGroupStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func (this *HTTPIngressPath) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HTTPIngressPath{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`, - `PathType:` + valueToStringGenerated(this.PathType) + `,`, - `}`, - }, "") - return s -} -func (this *HTTPIngressRuleValue) String() string { - if this == nil { - return "nil" - } - repeatedStringForPaths := "[]HTTPIngressPath{" - for _, f := range this.Paths { - repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + "," - } - repeatedStringForPaths += "}" - s := strings.Join([]string{`&HTTPIngressRuleValue{`, - `Paths:` + repeatedStringForPaths + `,`, - `}`, - }, "") - return s -} -func (this *HostPortRange) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HostPortRange{`, - `Min:` + fmt.Sprintf("%v", this.Min) + `,`, - `Max:` + fmt.Sprintf("%v", this.Max) + `,`, - `}`, - }, "") - return s -} -func (this *IDRange) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IDRange{`, - `Min:` + fmt.Sprintf("%v", this.Min) + `,`, - `Max:` + fmt.Sprintf("%v", this.Max) + `,`, - `}`, - }, "") - return s -} -func (this *IPBlock) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IPBlock{`, - `CIDR:` + fmt.Sprintf("%v", this.CIDR) + `,`, - `Except:` + fmt.Sprintf("%v", this.Except) + `,`, - `}`, - }, "") - return s -} -func (this *Ingress) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Ingress{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressSpec", "IngressSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IngressStatus", "IngressStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressBackend) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressBackend{`, - `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, - `ServicePort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServicePort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "TypedLocalObjectReference", "v11.TypedLocalObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]Ingress{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Ingress", "Ingress", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&IngressList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *IngressLoadBalancerIngress) String() string { - if this == nil { - return "nil" - } - repeatedStringForPorts := "[]IngressPortStatus{" - for _, f := range this.Ports { - repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "IngressPortStatus", "IngressPortStatus", 1), `&`, ``, 1) + "," - } - repeatedStringForPorts += "}" - s := strings.Join([]string{`&IngressLoadBalancerIngress{`, - `IP:` + fmt.Sprintf("%v", this.IP) + `,`, - `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, - `Ports:` + repeatedStringForPorts + `,`, - `}`, - }, "") - return s -} -func (this *IngressLoadBalancerStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForIngress := "[]IngressLoadBalancerIngress{" - for _, f := range this.Ingress { - repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "IngressLoadBalancerIngress", "IngressLoadBalancerIngress", 1), `&`, ``, 1) + "," - } - repeatedStringForIngress += "}" - s := strings.Join([]string{`&IngressLoadBalancerStatus{`, - `Ingress:` + repeatedStringForIngress + `,`, - `}`, - }, "") - return s -} -func (this *IngressPortStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressPortStatus{`, - `Port:` + fmt.Sprintf("%v", this.Port) + `,`, - `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, - `Error:` + valueToStringGenerated(this.Error) + `,`, - `}`, - }, "") - return s -} -func (this *IngressRule) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressRule{`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `IngressRuleValue:` + strings.Replace(strings.Replace(this.IngressRuleValue.String(), "IngressRuleValue", "IngressRuleValue", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressRuleValue) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressRuleValue{`, - `HTTP:` + strings.Replace(this.HTTP.String(), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressSpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForTLS := "[]IngressTLS{" - for _, f := range this.TLS { - repeatedStringForTLS += strings.Replace(strings.Replace(f.String(), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + "," - } - repeatedStringForTLS += "}" - repeatedStringForRules := "[]IngressRule{" - for _, f := range this.Rules { - repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "IngressRule", "IngressRule", 1), `&`, ``, 1) + "," - } - repeatedStringForRules += "}" - s := strings.Join([]string{`&IngressSpec{`, - `Backend:` + strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1) + `,`, - `TLS:` + repeatedStringForTLS + `,`, - `Rules:` + repeatedStringForRules + `,`, - `IngressClassName:` + valueToStringGenerated(this.IngressClassName) + `,`, - `}`, - }, "") - return s -} -func (this *IngressStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressStatus{`, - `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "IngressLoadBalancerStatus", "IngressLoadBalancerStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressTLS) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressTLS{`, - `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, - `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NetworkPolicyStatus", "NetworkPolicyStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyEgressRule) String() string { - if this == nil { - return "nil" - } - repeatedStringForPorts := "[]NetworkPolicyPort{" - for _, f := range this.Ports { - repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," - } - repeatedStringForPorts += "}" - repeatedStringForTo := "[]NetworkPolicyPeer{" - for _, f := range this.To { - repeatedStringForTo += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," - } - repeatedStringForTo += "}" - s := strings.Join([]string{`&NetworkPolicyEgressRule{`, - `Ports:` + repeatedStringForPorts + `,`, - `To:` + repeatedStringForTo + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyIngressRule) String() string { - if this == nil { - return "nil" - } - repeatedStringForPorts := "[]NetworkPolicyPort{" - for _, f := range this.Ports { - repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," - } - repeatedStringForPorts += "}" - repeatedStringForFrom := "[]NetworkPolicyPeer{" - for _, f := range this.From { - repeatedStringForFrom += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," - } - repeatedStringForFrom += "}" - s := strings.Join([]string{`&NetworkPolicyIngressRule{`, - `Ports:` + repeatedStringForPorts + `,`, - `From:` + repeatedStringForFrom + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]NetworkPolicy{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&NetworkPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyPeer) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicyPeer{`, - `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `IPBlock:` + strings.Replace(this.IPBlock.String(), "IPBlock", "IPBlock", 1) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyPort) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicyPort{`, - `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, - `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1) + `,`, - `EndPort:` + valueToStringGenerated(this.EndPort) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicySpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForIngress := "[]NetworkPolicyIngressRule{" - for _, f := range this.Ingress { - repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + "," - } - repeatedStringForIngress += "}" - repeatedStringForEgress := "[]NetworkPolicyEgressRule{" - for _, f := range this.Egress { - repeatedStringForEgress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + "," - } - repeatedStringForEgress += "}" - s := strings.Join([]string{`&NetworkPolicySpec{`, - `PodSelector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, - `Ingress:` + repeatedStringForIngress + `,`, - `Egress:` + repeatedStringForEgress + `,`, - `PolicyTypes:` + fmt.Sprintf("%v", this.PolicyTypes) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]Condition{" - for _, f := range this.Conditions { - repeatedStringForConditions += fmt.Sprintf("%v", f) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&NetworkPolicyStatus{`, - `Conditions:` + repeatedStringForConditions + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodSecurityPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySpec", "PodSecurityPolicySpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicyList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]PodSecurityPolicy{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&PodSecurityPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicySpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForHostPorts := "[]HostPortRange{" - for _, f := range this.HostPorts { - repeatedStringForHostPorts += strings.Replace(strings.Replace(f.String(), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + "," - } - repeatedStringForHostPorts += "}" - repeatedStringForAllowedHostPaths := "[]AllowedHostPath{" - for _, f := range this.AllowedHostPaths { - repeatedStringForAllowedHostPaths += strings.Replace(strings.Replace(f.String(), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + "," - } - repeatedStringForAllowedHostPaths += "}" - repeatedStringForAllowedFlexVolumes := "[]AllowedFlexVolume{" - for _, f := range this.AllowedFlexVolumes { - repeatedStringForAllowedFlexVolumes += strings.Replace(strings.Replace(f.String(), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + "," - } - repeatedStringForAllowedFlexVolumes += "}" - repeatedStringForAllowedCSIDrivers := "[]AllowedCSIDriver{" - for _, f := range this.AllowedCSIDrivers { - repeatedStringForAllowedCSIDrivers += strings.Replace(strings.Replace(f.String(), "AllowedCSIDriver", "AllowedCSIDriver", 1), `&`, ``, 1) + "," - } - repeatedStringForAllowedCSIDrivers += "}" - s := strings.Join([]string{`&PodSecurityPolicySpec{`, - `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, - `DefaultAddCapabilities:` + fmt.Sprintf("%v", this.DefaultAddCapabilities) + `,`, - `RequiredDropCapabilities:` + fmt.Sprintf("%v", this.RequiredDropCapabilities) + `,`, - `AllowedCapabilities:` + fmt.Sprintf("%v", this.AllowedCapabilities) + `,`, - `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, - `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, - `HostPorts:` + repeatedStringForHostPorts + `,`, - `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, - `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, - `SELinux:` + strings.Replace(strings.Replace(this.SELinux.String(), "SELinuxStrategyOptions", "SELinuxStrategyOptions", 1), `&`, ``, 1) + `,`, - `RunAsUser:` + strings.Replace(strings.Replace(this.RunAsUser.String(), "RunAsUserStrategyOptions", "RunAsUserStrategyOptions", 1), `&`, ``, 1) + `,`, - `SupplementalGroups:` + strings.Replace(strings.Replace(this.SupplementalGroups.String(), "SupplementalGroupsStrategyOptions", "SupplementalGroupsStrategyOptions", 1), `&`, ``, 1) + `,`, - `FSGroup:` + strings.Replace(strings.Replace(this.FSGroup.String(), "FSGroupStrategyOptions", "FSGroupStrategyOptions", 1), `&`, ``, 1) + `,`, - `ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`, - `DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`, - `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, - `AllowedHostPaths:` + repeatedStringForAllowedHostPaths + `,`, - `AllowedFlexVolumes:` + repeatedStringForAllowedFlexVolumes + `,`, - `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, - `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, - `AllowedProcMountTypes:` + fmt.Sprintf("%v", this.AllowedProcMountTypes) + `,`, - `RunAsGroup:` + strings.Replace(this.RunAsGroup.String(), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, - `AllowedCSIDrivers:` + repeatedStringForAllowedCSIDrivers + `,`, - `RuntimeClass:` + strings.Replace(this.RuntimeClass.String(), "RuntimeClassStrategyOptions", "RuntimeClassStrategyOptions", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSetCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]ReplicaSet{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ReplicaSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSetSpec{`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]ReplicaSetCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&ReplicaSetStatus{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, - `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, - `}`, - }, "") - return s -} -func (this *RollbackConfig) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RollbackConfig{`, - `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, - `}`, - }, "") - return s -} -func (this *RollingUpdateDaemonSet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RollingUpdateDaemonSet{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RollingUpdateDeployment) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RollingUpdateDeployment{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RunAsGroupStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&RunAsGroupStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func (this *RunAsUserStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&RunAsUserStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func (this *RuntimeClassStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RuntimeClassStrategyOptions{`, - `AllowedRuntimeClassNames:` + fmt.Sprintf("%v", this.AllowedRuntimeClassNames) + `,`, - `DefaultRuntimeClassName:` + valueToStringGenerated(this.DefaultRuntimeClassName) + `,`, - `}`, - }, "") - return s -} -func (this *SELinuxStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SELinuxStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "v11.SELinuxOptions", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Scale) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Scale{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ScaleSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ScaleSpec{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `}`, - }, "") - return s -} -func (this *ScaleStatus) String() string { - if this == nil { - return "nil" - } - keysForSelector := make([]string, 0, len(this.Selector)) - for k := range this.Selector { - keysForSelector = append(keysForSelector, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - mapStringForSelector := "map[string]string{" - for _, k := range keysForSelector { - mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) - } - mapStringForSelector += "}" - s := strings.Join([]string{`&ScaleStatus{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `Selector:` + mapStringForSelector + `,`, - `TargetSelector:` + fmt.Sprintf("%v", this.TargetSelector) + `,`, - `}`, - }, "") - return s -} -func (this *SupplementalGroupsStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&SupplementalGroupsStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedCSIDriver: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedCSIDriver: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedFlexVolume: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedFlexVolume: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Driver = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedHostPath: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedHostPath: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PathPrefix", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PathPrefix = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSet) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSet: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = DaemonSetConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, DaemonSet{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) - } - m.MinReadySeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinReadySeconds |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TemplateGeneration", wireType) - } - m.TemplateGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TemplateGeneration |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.RevisionHistoryLimit = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentNumberScheduled", wireType) - } - m.CurrentNumberScheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CurrentNumberScheduled |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberMisscheduled", wireType) - } - m.NumberMisscheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberMisscheduled |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DesiredNumberScheduled", wireType) - } - m.DesiredNumberScheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DesiredNumberScheduled |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberReady", wireType) - } - m.NumberReady = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberReady |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - m.ObservedGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedNumberScheduled", wireType) - } - m.UpdatedNumberScheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UpdatedNumberScheduled |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberAvailable", wireType) - } - m.NumberAvailable = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberAvailable |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberUnavailable", wireType) - } - m.NumberUnavailable = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberUnavailable |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.CollisionCount = &v - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, DaemonSetCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetUpdateStrategy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = DaemonSetUpdateStrategyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RollingUpdate == nil { - m.RollingUpdate = &RollingUpdateDaemonSet{} - } - if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Deployment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Deployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Deployment{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentRollback: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentRollback: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAnnotations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.UpdatedAnnotations == nil { - m.UpdatedAnnotations = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.UpdatedAnnotations[mapkey] = mapvalue - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Replicas = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) - } - m.MinReadySeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinReadySeconds |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.RevisionHistoryLimit = &v - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Paused = bool(v != 0) - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RollbackTo == nil { - m.RollbackTo = &RollbackConfig{} - } - if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ProgressDeadlineSeconds = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + s := strings.Join([]string{`&ReplicaSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetSpec) String() string { + if this == nil { + return "nil" } - - if iNdEx > l { - return io.ErrUnexpectedEOF + s := strings.Join([]string{`&ReplicaSetSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetStatus) String() string { + if this == nil { + return "nil" } - return nil + repeatedStringForConditions := "[]ReplicaSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ReplicaSetStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s } -func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { +func (this *RollbackConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollbackConfig{`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateDaemonSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateDaemonSet{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateDeployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateDeployment{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Scale) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Scale{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleSpec{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleStatus) String() string { + if this == nil { + return "nil" + } + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&ScaleStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `Selector:` + mapStringForSelector + `,`, + `TargetSelector:` + fmt.Sprintf("%v", this.TargetSelector) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *DaemonSet) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9521,55 +5405,17 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSet: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - m.ObservedGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - m.Replicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Replicas |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - m.UpdatedReplicas = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9579,52 +5425,28 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + if msglen < 0 { + return ErrInvalidLengthGenerated } - m.AvailableReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AvailableReplicas |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) + if postIndex > l { + return io.ErrUnexpectedEOF } - m.UnavailableReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UnavailableReplicas |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - case 6: + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9651,16 +5473,15 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Conditions = append(m.Conditions, DeploymentCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - m.ReadyReplicas = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9670,31 +5491,25 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + if msglen < 0 { + return ErrInvalidLengthGenerated } - m.CollisionCount = &v + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -9716,7 +5531,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { +func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9739,10 +5554,10 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSetCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -9775,11 +5590,43 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex]) + m.Type = DaemonSetConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9806,13 +5653,74 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RollingUpdate == nil { - m.RollingUpdate = &RollingUpdateDeployment{} - } - if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -9834,7 +5742,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } return nil } -func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *DaemonSetList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9857,17 +5765,17 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: FSGroupStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: FSGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9877,27 +5785,28 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Rule = FSGroupStrategyType(dAtA[iNdEx:postIndex]) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9924,8 +5833,8 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, DaemonSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -9950,7 +5859,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { +func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9973,17 +5882,17 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSetSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9993,27 +5902,31 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10040,96 +5953,13 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PathType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := PathType(dAtA[iNdEx:postIndex]) - m.PathType = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10156,66 +5986,34 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Paths = append(m.Paths, HTTPIngressPath{}) - if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HostPortRange) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HostPortRange: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HostPortRange: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 5: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TemplateGeneration", wireType) } - m.Min = 0 + m.TemplateGeneration = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10225,16 +6023,16 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Min |= int32(b&0x7F) << shift + m.TemplateGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } } - case 2: + case 6: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) } - m.Max = 0 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10244,11 +6042,12 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Max |= int32(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } + m.RevisionHistoryLimit = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -10270,7 +6069,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } return nil } -func (m *IDRange) Unmarshal(dAtA []byte) error { +func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10293,17 +6092,17 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IDRange: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSetStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IDRange: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CurrentNumberScheduled", wireType) } - m.Min = 0 + m.CurrentNumberScheduled = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10313,16 +6112,16 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Min |= int64(b&0x7F) << shift + m.CurrentNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NumberMisscheduled", wireType) } - m.Max = 0 + m.NumberMisscheduled = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10332,66 +6131,73 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Max |= int64(b&0x7F) << shift + m.NumberMisscheduled |= int32(b&0x7F) << shift if b < 0x80 { break } } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredNumberScheduled", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated + m.DesiredNumberScheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DesiredNumberScheduled |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberReady", wireType) } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IPBlock) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + m.NumberReady = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberReady |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IPBlock: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IPBlock: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CIDR", wireType) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedNumberScheduled", wireType) } - var stringLen uint64 + m.UpdatedNumberScheduled = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10401,29 +6207,74 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.UpdatedNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberAvailable", wireType) } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + m.NumberAvailable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberAvailable |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - if postIndex > l { - return io.ErrUnexpectedEOF + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberUnavailable", wireType) } - m.CIDR = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + m.NumberUnavailable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberUnavailable |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Except", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10433,23 +6284,25 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Except = append(m.Except, string(dAtA[iNdEx:postIndex])) + m.Conditions = append(m.Conditions, DaemonSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -10472,7 +6325,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } return nil } -func (m *Ingress) Unmarshal(dAtA []byte) error { +func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10495,17 +6348,17 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Ingress: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSetUpdateStrategy: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10515,28 +6368,27 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Type = DaemonSetUpdateStrategyType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10563,40 +6415,10 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDaemonSet{} } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -10621,7 +6443,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressBackend) Unmarshal(dAtA []byte) error { +func (m *Deployment) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10644,17 +6466,17 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") + return fmt.Errorf("proto: Deployment: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10664,27 +6486,28 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ServiceName = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10711,13 +6534,13 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ServicePort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10744,10 +6567,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Resource == nil { - m.Resource = &v11.TypedLocalObjectReference{} - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -10772,7 +6592,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressList) Unmarshal(dAtA []byte) error { +func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10795,17 +6615,17 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressList: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10815,30 +6635,29 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10848,79 +6667,27 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, Ingress{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressLoadBalancerIngress: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressLoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -10948,11 +6715,11 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.IP = string(dAtA[iNdEx:postIndex]) + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -10980,11 +6747,11 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Hostname = string(dAtA[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11001,74 +6768,23 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ports = append(m.Ports, IngressPortStatus{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + if msglen < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressLoadBalancerStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressLoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11095,8 +6811,7 @@ func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ingress = append(m.Ingress, IngressLoadBalancerIngress{}) - if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -11121,7 +6836,7 @@ func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { +func (m *DeploymentList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11144,36 +6859,17 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressPortStatus: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressPortStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - m.Port = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Port |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11183,29 +6879,30 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Protocol = k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11215,24 +6912,25 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Error = &s + m.Items = append(m.Items, Deployment{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -11255,7 +6953,7 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressRule) Unmarshal(dAtA []byte) error { +func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11278,15 +6976,15 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentRollback: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentRollback: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -11314,11 +7012,11 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Host = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAnnotations", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11345,63 +7043,107 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if m.UpdatedAnnotations == nil { + m.UpdatedAnnotations = make(map[string]string) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.UpdatedAnnotations[mapkey] = mapvalue + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11428,10 +7170,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.HTTP == nil { - m.HTTP = &HTTPIngressRuleValue{} - } - if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -11456,7 +7195,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressSpec) Unmarshal(dAtA []byte) error { +func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11479,15 +7218,35 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11514,16 +7273,16 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Backend == nil { - m.Backend = &IngressBackend{} + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} } - if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11550,14 +7309,13 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TLS = append(m.TLS, IngressTLS{}) - if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11584,16 +7342,74 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Rules = append(m.Rules, IngressRule{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Paused = bool(v != 0) + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11603,25 +7419,48 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.IngressClassName = &s + if m.RollbackTo == nil { + m.RollbackTo = &RollbackConfig{} + } + if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ProgressDeadlineSeconds = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -11643,7 +7482,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressStatus) Unmarshal(dAtA []byte) error { +func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11666,15 +7505,110 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) + } + m.UnavailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UnavailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11701,10 +7635,50 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Conditions = append(m.Conditions, DeploymentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -11726,7 +7700,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressTLS) Unmarshal(dAtA []byte) error { +func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11749,15 +7723,15 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -11785,13 +7759,13 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) + m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11801,23 +7775,27 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.SecretName = string(dAtA[iNdEx:postIndex]) + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDeployment{} + } + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -11840,7 +7818,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { +func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11863,17 +7841,17 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicy: wiretype end group for non-group") + return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11883,28 +7861,27 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11931,15 +7908,15 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PathType", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11949,24 +7926,24 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := PathType(dAtA[iNdEx:postIndex]) + m.PathType = &s iNdEx = postIndex default: iNdEx = preIndex @@ -11989,7 +7966,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { +func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12012,49 +7989,15 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyEgressRule: wiretype end group for non-group") + return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyEgressRule: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ports = append(m.Ports, NetworkPolicyPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12081,8 +8024,8 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.To = append(m.To, NetworkPolicyPeer{}) - if err := m.To[len(m.To)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Paths = append(m.Paths, HTTPIngressPath{}) + if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -12107,7 +8050,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { +func (m *IPBlock) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12130,17 +8073,17 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyIngressRule: wiretype end group for non-group") + return fmt.Errorf("proto: IPBlock: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyIngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IPBlock: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CIDR", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12150,31 +8093,29 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Ports = append(m.Ports, NetworkPolicyPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.CIDR = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Except", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12184,25 +8125,23 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.From = append(m.From, NetworkPolicyPeer{}) - if err := m.From[len(m.From)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Except = append(m.Except, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -12225,7 +8164,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { +func (m *Ingress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12248,15 +8187,15 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyList: wiretype end group for non-group") + return fmt.Errorf("proto: Ingress: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12283,13 +8222,13 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12316,8 +8255,40 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, NetworkPolicy{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -12342,7 +8313,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { +func (m *IngressBackend) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12365,17 +8336,17 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyPeer: wiretype end group for non-group") + return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12385,31 +8356,27 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.PodSelector == nil { - m.PodSelector = &v1.LabelSelector{} - } - if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ServiceName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12436,16 +8403,13 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.NamespaceSelector == nil { - m.NamespaceSelector = &v1.LabelSelector{} - } - if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ServicePort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IPBlock", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12472,10 +8436,10 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.IPBlock == nil { - m.IPBlock = &IPBlock{} + if m.Resource == nil { + m.Resource = &v11.TypedLocalObjectReference{} } - if err := m.IPBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -12500,7 +8464,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { +func (m *IngressList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12523,17 +8487,17 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyPort: wiretype end group for non-group") + return fmt.Errorf("proto: IngressList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyPort: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12543,28 +8507,28 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) - m.Protocol = &s + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12591,33 +8555,11 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Port == nil { - m.Port = &intstr.IntOrString{} - } - if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, Ingress{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EndPort", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.EndPort = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -12639,7 +8581,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { +func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12662,17 +8604,17 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicySpec: wiretype end group for non-group") + return fmt.Errorf("proto: IngressLoadBalancerIngress: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressLoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12682,30 +8624,29 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.IP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12715,29 +8656,27 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Ingress = append(m.Ingress, NetworkPolicyIngressRule{}) - if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Hostname = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Egress", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12764,16 +8703,66 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Egress = append(m.Egress, NetworkPolicyEgressRule{}) - if err := m.Egress[len(m.Egress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ports = append(m.Ports, IngressPortStatus{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressLoadBalancerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressLoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PolicyTypes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12783,23 +8772,25 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.PolicyTypes = append(m.PolicyTypes, PolicyType(dAtA[iNdEx:postIndex])) + m.Ingress = append(m.Ingress, IngressLoadBalancerIngress{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -12822,7 +8813,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyStatus) Unmarshal(dAtA []byte) error { +func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12845,17 +8836,36 @@ func (m *NetworkPolicyStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyStatus: wiretype end group for non-group") + return fmt.Errorf("proto: IngressPortStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressPortStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12865,25 +8875,56 @@ func (m *NetworkPolicyStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Protocol = k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Conditions = append(m.Conditions, v1.Condition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.Error = &s iNdEx = postIndex default: iNdEx = preIndex @@ -12906,7 +8947,7 @@ func (m *NetworkPolicyStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { +func (m *IngressRule) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12929,17 +8970,17 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicy: wiretype end group for non-group") + return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12949,28 +8990,27 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Host = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12997,7 +9037,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13022,7 +9062,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { +func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13045,15 +9085,15 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicyList: wiretype end group for non-group") + return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13080,41 +9120,10 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + if m.HTTP == nil { + m.HTTP = &HTTPIngressRuleValue{} } - m.Items = append(m.Items, PodSecurityPolicy{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13139,7 +9148,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { +func (m *IngressSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13162,37 +9171,17 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicySpec: wiretype end group for non-group") + return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Privileged = bool(v != 0) - case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultAddCapabilities", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13202,29 +9191,33 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.DefaultAddCapabilities = append(m.DefaultAddCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + if m.Backend == nil { + m.Backend = &IngressBackend{} + } + if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequiredDropCapabilities", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13234,29 +9227,31 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.RequiredDropCapabilities = append(m.RequiredDropCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + m.TLS = append(m.TLS, IngressTLS{}) + if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedCapabilities", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13266,27 +9261,29 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedCapabilities = append(m.AllowedCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + m.Rules = append(m.Rules, IngressRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13314,31 +9311,62 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Volumes = append(m.Volumes, FSType(dAtA[iNdEx:postIndex])) + s := string(dAtA[iNdEx:postIndex]) + m.IngressClassName = &s iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated } - m.HostNetwork = bool(v != 0) - case 7: + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPorts", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13365,16 +9393,65 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.HostPorts = append(m.HostPorts, HostPortRange{}) - if err := m.HostPorts[len(m.HostPorts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPID", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressTLS) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13384,37 +9461,29 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.HostPID = bool(v != 0) - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostIPC", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - m.HostIPC = bool(v != 0) - case 10: + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SELinux", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13424,28 +9493,77 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.SELinux.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.SecretName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { return err } - iNdEx = postIndex - case 11: + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13472,13 +9590,13 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.RunAsUser.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 12: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13505,13 +9623,13 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.SupplementalGroups.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 13: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13538,75 +9656,63 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.FSGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnlyRootFilesystem", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated } - m.ReadOnlyRootFilesystem = bool(v != 0) - case 15: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultAllowPrivilegeEscalation", wireType) + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - b := bool(v != 0) - m.DefaultAllowPrivilegeEscalation = &b - case 16: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowPrivilegeEscalation", wireType) + if iNdEx >= l { + return io.ErrUnexpectedEOF } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - b := bool(v != 0) - m.AllowPrivilegeEscalation = &b - case 17: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyEgressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyEgressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedHostPaths", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13633,14 +9739,14 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedHostPaths = append(m.AllowedHostPaths, AllowedHostPath{}) - if err := m.AllowedHostPaths[len(m.AllowedHostPaths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ports = append(m.Ports, NetworkPolicyPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 18: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedFlexVolumes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13667,80 +9773,66 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedFlexVolumes = append(m.AllowedFlexVolumes, AllowedFlexVolume{}) - if err := m.AllowedFlexVolumes[len(m.AllowedFlexVolumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.To = append(m.To, NetworkPolicyPeer{}) + if err := m.To[len(m.To)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 19: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedUnsafeSysctls", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllowedUnsafeSysctls = append(m.AllowedUnsafeSysctls, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 20: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ForbiddenSysctls", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - if postIndex > l { + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.ForbiddenSysctls = append(m.ForbiddenSysctls, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 21: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyIngressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyIngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedProcMountTypes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13750,27 +9842,29 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedProcMountTypes = append(m.AllowedProcMountTypes, k8s_io_api_core_v1.ProcMountType(dAtA[iNdEx:postIndex])) + m.Ports = append(m.Ports, NetworkPolicyPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 22: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13797,16 +9891,64 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RunAsGroup == nil { - m.RunAsGroup = &RunAsGroupStrategyOptions{} - } - if err := m.RunAsGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.From = append(m.From, NetworkPolicyPeer{}) + if err := m.From[len(m.From)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 23: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedCSIDrivers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13833,14 +9975,13 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedCSIDrivers = append(m.AllowedCSIDrivers, AllowedCSIDriver{}) - if err := m.AllowedCSIDrivers[len(m.AllowedCSIDrivers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 24: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RuntimeClass", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13867,10 +10008,8 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RuntimeClass == nil { - m.RuntimeClass = &RuntimeClassStrategyOptions{} - } - if err := m.RuntimeClass.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, NetworkPolicy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13895,7 +10034,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *ReplicaSet) Unmarshal(dAtA []byte) error { +func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13918,15 +10057,15 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicaSet: wiretype end group for non-group") + return fmt.Errorf("proto: NetworkPolicyPeer: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSet: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13953,13 +10092,16 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.PodSelector == nil { + m.PodSelector = &v1.LabelSelector{} + } + if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13986,13 +10128,16 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IPBlock", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14019,7 +10164,10 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.IPBlock == nil { + m.IPBlock = &IPBlock{} + } + if err := m.IPBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14044,7 +10192,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } return nil } -func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { +func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14067,15 +10215,15 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetCondition: wiretype end group for non-group") + return fmt.Errorf("proto: NetworkPolicyPort: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NetworkPolicyPort: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14103,13 +10251,120 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = ReplicaSetConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + s := k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + m.Protocol = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Port == nil { + m.Port = &intstr.IntOrString{} + } + if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndPort", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.EndPort = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicySpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14119,27 +10374,28 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14166,15 +10422,16 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ingress = append(m.Ingress, NetworkPolicyIngressRule{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Egress", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14184,27 +10441,29 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(dAtA[iNdEx:postIndex]) + m.Egress = append(m.Egress, NetworkPolicyEgressRule{}) + if err := m.Egress[len(m.Egress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PolicyTypes", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14232,7 +10491,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + m.PolicyTypes = append(m.PolicyTypes, PolicyType(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -14255,7 +10514,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } return nil } -func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { +func (m *NetworkPolicyStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14278,48 +10537,15 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetList: wiretype end group for non-group") + return fmt.Errorf("proto: NetworkPolicyStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NetworkPolicyStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14346,8 +10572,8 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ReplicaSet{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14372,7 +10598,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } return nil } -func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { +func (m *ReplicaSet) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14395,17 +10621,17 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetSpec: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaSet: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaSet: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var v int32 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14415,15 +10641,28 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Replicas = &v + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14450,16 +10689,13 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Selector == nil { - m.Selector = &v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14486,29 +10722,10 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) - } - m.MinReadySeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinReadySeconds |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -14530,7 +10747,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { +func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14553,17 +10770,17 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetStatus: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaSetCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - m.Replicas = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14573,35 +10790,29 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - m.FullyLabeledReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.FullyLabeledReplicas |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + if postIndex > l { + return io.ErrUnexpectedEOF } - m.ObservedGeneration = 0 + m.Type = ReplicaSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14611,35 +10822,29 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - m.ReadyReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ReadyReplicas |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) } - m.AvailableReplicas = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14649,16 +10854,30 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 6: + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14668,81 +10887,29 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Conditions = append(m.Conditions, ReplicaSetCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RollbackConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RollbackConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RollbackConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - m.Revision = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14752,11 +10919,24 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -14778,7 +10958,7 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { } return nil } -func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { +func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14801,15 +10981,15 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RollingUpdateDaemonSet: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaSetList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RollingUpdateDaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaSetList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14836,16 +11016,13 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.MaxUnavailable == nil { - m.MaxUnavailable = &intstr.IntOrString{} - } - if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14872,10 +11049,8 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.MaxSurge == nil { - m.MaxSurge = &intstr.IntOrString{} - } - if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, ReplicaSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14900,7 +11075,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } return nil } -func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { +func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14923,15 +11098,35 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaSetSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14958,16 +11153,16 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.MaxUnavailable == nil { - m.MaxUnavailable = &intstr.IntOrString{} + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} } - if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14994,13 +11189,29 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.MaxSurge == nil { - m.MaxSurge = &intstr.IntOrString{} - } - if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -15022,7 +11233,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } return nil } -func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15045,17 +11256,74 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RunAsGroupStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaSetStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RunAsGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType) + } + m.FullyLabeledReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FullyLabeledReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) } - var stringLen uint64 + m.ReadyReplicas = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15065,27 +11333,33 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) } - if postIndex > l { - return io.ErrUnexpectedEOF + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - m.Rule = RunAsGroupStrategy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15112,8 +11386,8 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Conditions = append(m.Conditions, ReplicaSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -15138,7 +11412,7 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *RollbackConfig) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15161,49 +11435,17 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RunAsUserStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: RollbackConfig: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RunAsUserStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RollbackConfig: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rule = RunAsUserStrategy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) } - var msglen int + m.Revision = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15213,26 +11455,11 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Revision |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -15254,7 +11481,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15277,17 +11504,17 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RuntimeClassStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: RollingUpdateDaemonSet: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RuntimeClassStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RollingUpdateDaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedRuntimeClassNames", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15297,29 +11524,33 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedRuntimeClassNames = append(m.AllowedRuntimeClassNames, string(dAtA[iNdEx:postIndex])) + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultRuntimeClassName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15329,24 +11560,27 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.DefaultRuntimeClassName = &s + if m.MaxSurge == nil { + m.MaxSurge = &intstr.IntOrString{} + } + if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -15369,7 +11603,7 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15392,17 +11626,17 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SELinuxStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SELinuxStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15412,27 +11646,31 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Rule = SELinuxStrategy(dAtA[iNdEx:postIndex]) + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15459,10 +11697,10 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SELinuxOptions == nil { - m.SELinuxOptions = &v11.SELinuxOptions{} + if m.MaxSurge == nil { + m.MaxSurge = &intstr.IntOrString{} } - if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -15933,122 +12171,6 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rule = SupplementalGroupsStrategyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/generated.proto index 0509bc3d6795..76fb5a615949 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/generated.proto @@ -30,37 +30,6 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "k8s.io/api/extensions/v1beta1"; -// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. -message AllowedCSIDriver { - // Name is the registered name of the CSI driver - optional string name = 1; -} - -// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. -// Deprecated: use AllowedFlexVolume from policy API Group instead. -message AllowedFlexVolume { - // driver is the name of the Flexvolume driver. - optional string driver = 1; -} - -// AllowedHostPath defines the host volume conditions that will be enabled by a policy -// for pods to use. It requires the path prefix to be defined. -// Deprecated: use AllowedHostPath from policy API Group instead. -message AllowedHostPath { - // pathPrefix is the path prefix that the host volume must match. - // It does not support `*`. - // Trailing slashes are trimmed when validating the path prefix with a host path. - // - // Examples: - // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` - // `/foo` would not allow `/food` or `/etc/foo` - optional string pathPrefix = 1; - - // when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. - // +optional - optional bool readOnly = 2; -} - // DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for // more information. // DaemonSet represents the configuration of a daemon set. @@ -398,19 +367,6 @@ message DeploymentStrategy { optional RollingUpdateDeployment rollingUpdate = 2; } -// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. -// Deprecated: use FSGroupStrategyOptions from policy API Group instead. -message FSGroupStrategyOptions { - // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. - // +optional - optional string rule = 1; - - // ranges are the allowed ranges of fs groups. If you would like to force a single - // fs group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - // HTTPIngressPath associates a path with a backend. Incoming urls matching the // path are forwarded to the backend. message HTTPIngressPath { @@ -453,27 +409,6 @@ message HTTPIngressRuleValue { repeated HTTPIngressPath paths = 1; } -// HostPortRange defines a range of host ports that will be enabled by a policy -// for pods to use. It requires both the start and end to be defined. -// Deprecated: use HostPortRange from policy API Group instead. -message HostPortRange { - // min is the start of the range, inclusive. - optional int32 min = 1; - - // max is the end of the range, inclusive. - optional int32 max = 2; -} - -// IDRange provides a min/max of an allowed range of IDs. -// Deprecated: use IDRange from policy API Group instead. -message IDRange { - // min is the start of the range, inclusive. - optional int64 min = 1; - - // max is the end of the range, inclusive. - optional int64 max = 2; -} - // DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. // IPBlock describes a particular CIDR (Ex. "192.168.1.0/24","2001:db8::/64") that is allowed // to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs @@ -875,164 +810,6 @@ message NetworkPolicyStatus { repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1; } -// PodSecurityPolicy governs the ability to make requests that affect the Security Context -// that will be applied to a pod and container. -// Deprecated: use PodSecurityPolicy from policy API Group instead. -message PodSecurityPolicy { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // spec defines the policy enforced. - // +optional - optional PodSecurityPolicySpec spec = 2; -} - -// PodSecurityPolicyList is a list of PodSecurityPolicy objects. -// Deprecated: use PodSecurityPolicyList from policy API Group instead. -message PodSecurityPolicyList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items is a list of schema objects. - repeated PodSecurityPolicy items = 2; -} - -// PodSecurityPolicySpec defines the policy enforced. -// Deprecated: use PodSecurityPolicySpec from policy API Group instead. -message PodSecurityPolicySpec { - // privileged determines if a pod can request to be run as privileged. - // +optional - optional bool privileged = 1; - - // defaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capability in both - // defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly - // allowed, and need not be included in the allowedCapabilities list. - // +optional - repeated string defaultAddCapabilities = 2; - - // requiredDropCapabilities are the capabilities that will be dropped from the container. These - // are required to be dropped and cannot be added. - // +optional - repeated string requiredDropCapabilities = 3; - - // allowedCapabilities is a list of capabilities that can be requested to add to the container. - // Capabilities in this field may be added at the pod author's discretion. - // You must not list a capability in both allowedCapabilities and requiredDropCapabilities. - // +optional - repeated string allowedCapabilities = 4; - - // volumes is an allowlist of volume plugins. Empty indicates that - // no volumes may be used. To allow all volumes you may use '*'. - // +optional - repeated string volumes = 5; - - // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. - // +optional - optional bool hostNetwork = 6; - - // hostPorts determines which host port ranges are allowed to be exposed. - // +optional - repeated HostPortRange hostPorts = 7; - - // hostPID determines if the policy allows the use of HostPID in the pod spec. - // +optional - optional bool hostPID = 8; - - // hostIPC determines if the policy allows the use of HostIPC in the pod spec. - // +optional - optional bool hostIPC = 9; - - // seLinux is the strategy that will dictate the allowable labels that may be set. - optional SELinuxStrategyOptions seLinux = 10; - - // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. - optional RunAsUserStrategyOptions runAsUser = 11; - - // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. - // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the - // RunAsGroup feature gate to be enabled. - // +optional - optional RunAsGroupStrategyOptions runAsGroup = 22; - - // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. - optional SupplementalGroupsStrategyOptions supplementalGroups = 12; - - // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. - optional FSGroupStrategyOptions fsGroup = 13; - - // readOnlyRootFilesystem when set to true will force containers to run with a read only root file - // system. If the container specifically requests to run with a non-read only root file system - // the PSP should deny the pod. - // If set to false the container may run with a read only root file system if it wishes but it - // will not be forced to. - // +optional - optional bool readOnlyRootFilesystem = 14; - - // defaultAllowPrivilegeEscalation controls the default setting for whether a - // process can gain more privileges than its parent process. - // +optional - optional bool defaultAllowPrivilegeEscalation = 15; - - // allowPrivilegeEscalation determines if a pod can request to allow - // privilege escalation. If unspecified, defaults to true. - // +optional - optional bool allowPrivilegeEscalation = 16; - - // allowedHostPaths is an allowlist of host paths. Empty indicates - // that all host paths may be used. - // +optional - repeated AllowedHostPath allowedHostPaths = 17; - - // allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all - // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes - // is allowed in the "volumes" field. - // +optional - repeated AllowedFlexVolume allowedFlexVolumes = 18; - - // AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. - // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. - // +optional - repeated AllowedCSIDriver allowedCSIDrivers = 23; - - // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. - // Kubelet has to allowlist all unsafe sysctls explicitly to avoid rejection. - // - // Examples: - // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. - // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. - // +optional - repeated string allowedUnsafeSysctls = 19; - - // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. - // - // Examples: - // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. - // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. - // +optional - repeated string forbiddenSysctls = 20; - - // AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. - // Empty or nil indicates that only the DefaultProcMountType may be used. - // This requires the ProcMountType feature flag to be enabled. - // +optional - repeated string allowedProcMountTypes = 21; - - // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. - // If this field is omitted, the pod's runtimeClassName field is unrestricted. - // Enforcement of this field depends on the RuntimeClass feature gate being enabled. - // +optional - optional RuntimeClassStrategyOptions runtimeClass = 24; -} - // DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for // more information. // ReplicaSet ensures that a specified number of pod replicas are running at any given time. @@ -1227,57 +1004,6 @@ message RollingUpdateDeployment { optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } -// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use RunAsGroupStrategyOptions from policy API Group instead. -message RunAsGroupStrategyOptions { - // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. - optional string rule = 1; - - // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - -// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use RunAsUserStrategyOptions from policy API Group instead. -message RunAsUserStrategyOptions { - // rule is the strategy that will dictate the allowable RunAsUser values that may be set. - optional string rule = 1; - - // ranges are the allowed ranges of uids that may be used. If you would like to force a single uid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - -// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses -// for a pod. -message RuntimeClassStrategyOptions { - // allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. - // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the - // list. An empty list requires the RuntimeClassName field to be unset. - repeated string allowedRuntimeClassNames = 1; - - // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. - // The default MUST be allowed by the allowedRuntimeClassNames list. - // A value of nil does not mutate the Pod. - // +optional - optional string defaultRuntimeClassName = 2; -} - -// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use SELinuxStrategyOptions from policy API Group instead. -message SELinuxStrategyOptions { - // rule is the strategy that will dictate the allowable labels that may be set. - optional string rule = 1; - - // seLinuxOptions required to run as; required for MustRunAs - // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - // +optional - optional k8s.io.api.core.v1.SELinuxOptions seLinuxOptions = 2; -} - // represents a scaling request for a resource. message Scale { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. @@ -1320,16 +1046,3 @@ message ScaleStatus { optional string targetSelector = 3; } -// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. -// Deprecated: use SupplementalGroupsStrategyOptions from policy API Group instead. -message SupplementalGroupsStrategyOptions { - // rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. - // +optional - optional string rule = 1; - - // ranges are the allowed ranges of supplemental groups. If you would like to force a single - // supplemental group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - diff --git a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/register.go b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/register.go index c69eff0bc46f..d58908edc042 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/register.go +++ b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/register.go @@ -54,8 +54,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &IngressList{}, &ReplicaSet{}, &ReplicaSetList{}, - &PodSecurityPolicy{}, - &PodSecurityPolicyList{}, &NetworkPolicy{}, &NetworkPolicyList{}, ) diff --git a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/types.go b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/types.go index be1b95e62c9e..252fd94b9eab 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/types.go @@ -1021,389 +1021,6 @@ type ReplicaSetCondition struct { Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` } -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.2 -// +k8s:prerelease-lifecycle-gen:deprecated=1.11 -// +k8s:prerelease-lifecycle-gen:removed=1.16 -// +k8s:prerelease-lifecycle-gen:replacement=policy,v1beta1,PodSecurityPolicy - -// PodSecurityPolicy governs the ability to make requests that affect the Security Context -// that will be applied to a pod and container. -// Deprecated: use PodSecurityPolicy from policy API Group instead. -type PodSecurityPolicy struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // spec defines the policy enforced. - // +optional - Spec PodSecurityPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// PodSecurityPolicySpec defines the policy enforced. -// Deprecated: use PodSecurityPolicySpec from policy API Group instead. -type PodSecurityPolicySpec struct { - // privileged determines if a pod can request to be run as privileged. - // +optional - Privileged bool `json:"privileged,omitempty" protobuf:"varint,1,opt,name=privileged"` - // defaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capability in both - // defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly - // allowed, and need not be included in the allowedCapabilities list. - // +optional - DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty" protobuf:"bytes,2,rep,name=defaultAddCapabilities,casttype=k8s.io/api/core/v1.Capability"` - // requiredDropCapabilities are the capabilities that will be dropped from the container. These - // are required to be dropped and cannot be added. - // +optional - RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty" protobuf:"bytes,3,rep,name=requiredDropCapabilities,casttype=k8s.io/api/core/v1.Capability"` - // allowedCapabilities is a list of capabilities that can be requested to add to the container. - // Capabilities in this field may be added at the pod author's discretion. - // You must not list a capability in both allowedCapabilities and requiredDropCapabilities. - // +optional - AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty" protobuf:"bytes,4,rep,name=allowedCapabilities,casttype=k8s.io/api/core/v1.Capability"` - // volumes is an allowlist of volume plugins. Empty indicates that - // no volumes may be used. To allow all volumes you may use '*'. - // +optional - Volumes []FSType `json:"volumes,omitempty" protobuf:"bytes,5,rep,name=volumes,casttype=FSType"` - // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. - // +optional - HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,6,opt,name=hostNetwork"` - // hostPorts determines which host port ranges are allowed to be exposed. - // +optional - HostPorts []HostPortRange `json:"hostPorts,omitempty" protobuf:"bytes,7,rep,name=hostPorts"` - // hostPID determines if the policy allows the use of HostPID in the pod spec. - // +optional - HostPID bool `json:"hostPID,omitempty" protobuf:"varint,8,opt,name=hostPID"` - // hostIPC determines if the policy allows the use of HostIPC in the pod spec. - // +optional - HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,9,opt,name=hostIPC"` - // seLinux is the strategy that will dictate the allowable labels that may be set. - SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"` - // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. - RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"` - // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. - // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the - // RunAsGroup feature gate to be enabled. - // +optional - RunAsGroup *RunAsGroupStrategyOptions `json:"runAsGroup,omitempty" protobuf:"bytes,22,opt,name=runAsGroup"` - // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. - SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"` - // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. - FSGroup FSGroupStrategyOptions `json:"fsGroup" protobuf:"bytes,13,opt,name=fsGroup"` - // readOnlyRootFilesystem when set to true will force containers to run with a read only root file - // system. If the container specifically requests to run with a non-read only root file system - // the PSP should deny the pod. - // If set to false the container may run with a read only root file system if it wishes but it - // will not be forced to. - // +optional - ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,14,opt,name=readOnlyRootFilesystem"` - // defaultAllowPrivilegeEscalation controls the default setting for whether a - // process can gain more privileges than its parent process. - // +optional - DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty" protobuf:"varint,15,opt,name=defaultAllowPrivilegeEscalation"` - // allowPrivilegeEscalation determines if a pod can request to allow - // privilege escalation. If unspecified, defaults to true. - // +optional - AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,16,opt,name=allowPrivilegeEscalation"` - // allowedHostPaths is an allowlist of host paths. Empty indicates - // that all host paths may be used. - // +optional - AllowedHostPaths []AllowedHostPath `json:"allowedHostPaths,omitempty" protobuf:"bytes,17,rep,name=allowedHostPaths"` - // allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all - // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes - // is allowed in the "volumes" field. - // +optional - AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"` - // AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. - // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. - // +optional - AllowedCSIDrivers []AllowedCSIDriver `json:"allowedCSIDrivers,omitempty" protobuf:"bytes,23,rep,name=allowedCSIDrivers"` - // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. - // Kubelet has to allowlist all unsafe sysctls explicitly to avoid rejection. - // - // Examples: - // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. - // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. - // +optional - AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty" protobuf:"bytes,19,rep,name=allowedUnsafeSysctls"` - // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. - // - // Examples: - // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. - // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. - // +optional - ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty" protobuf:"bytes,20,rep,name=forbiddenSysctls"` - // AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. - // Empty or nil indicates that only the DefaultProcMountType may be used. - // This requires the ProcMountType feature flag to be enabled. - // +optional - AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty" protobuf:"bytes,21,opt,name=allowedProcMountTypes"` - // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. - // If this field is omitted, the pod's runtimeClassName field is unrestricted. - // Enforcement of this field depends on the RuntimeClass feature gate being enabled. - // +optional - RuntimeClass *RuntimeClassStrategyOptions `json:"runtimeClass,omitempty" protobuf:"bytes,24,opt,name=runtimeClass"` -} - -// AllowedHostPath defines the host volume conditions that will be enabled by a policy -// for pods to use. It requires the path prefix to be defined. -// Deprecated: use AllowedHostPath from policy API Group instead. -type AllowedHostPath struct { - // pathPrefix is the path prefix that the host volume must match. - // It does not support `*`. - // Trailing slashes are trimmed when validating the path prefix with a host path. - // - // Examples: - // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` - // `/foo` would not allow `/food` or `/etc/foo` - PathPrefix string `json:"pathPrefix,omitempty" protobuf:"bytes,1,rep,name=pathPrefix"` - - // when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` -} - -// FSType gives strong typing to different file systems that are used by volumes. -// Deprecated: use FSType from policy API Group instead. -type FSType string - -const ( - AzureFile FSType = "azureFile" - Flocker FSType = "flocker" - FlexVolume FSType = "flexVolume" - HostPath FSType = "hostPath" - EmptyDir FSType = "emptyDir" - GCEPersistentDisk FSType = "gcePersistentDisk" - AWSElasticBlockStore FSType = "awsElasticBlockStore" - GitRepo FSType = "gitRepo" - Secret FSType = "secret" - NFS FSType = "nfs" - ISCSI FSType = "iscsi" - Glusterfs FSType = "glusterfs" - PersistentVolumeClaim FSType = "persistentVolumeClaim" - RBD FSType = "rbd" - Cinder FSType = "cinder" - CephFS FSType = "cephFS" - DownwardAPI FSType = "downwardAPI" - FC FSType = "fc" - ConfigMap FSType = "configMap" - Quobyte FSType = "quobyte" - AzureDisk FSType = "azureDisk" - CSI FSType = "csi" - All FSType = "*" -) - -// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. -// Deprecated: use AllowedFlexVolume from policy API Group instead. -type AllowedFlexVolume struct { - // driver is the name of the Flexvolume driver. - Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` -} - -// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. -type AllowedCSIDriver struct { - // Name is the registered name of the CSI driver - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` -} - -// HostPortRange defines a range of host ports that will be enabled by a policy -// for pods to use. It requires both the start and end to be defined. -// Deprecated: use HostPortRange from policy API Group instead. -type HostPortRange struct { - // min is the start of the range, inclusive. - Min int32 `json:"min" protobuf:"varint,1,opt,name=min"` - // max is the end of the range, inclusive. - Max int32 `json:"max" protobuf:"varint,2,opt,name=max"` -} - -// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use SELinuxStrategyOptions from policy API Group instead. -type SELinuxStrategyOptions struct { - // rule is the strategy that will dictate the allowable labels that may be set. - Rule SELinuxStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=SELinuxStrategy"` - // seLinuxOptions required to run as; required for MustRunAs - // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - // +optional - SELinuxOptions *v1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"` -} - -// SELinuxStrategy denotes strategy types for generating SELinux options for a -// Security Context. -// Deprecated: use SELinuxStrategy from policy API Group instead. -type SELinuxStrategy string - -const ( - // SELinuxStrategyMustRunAs means that container must have SELinux labels of X applied. - // Deprecated: use SELinuxStrategyMustRunAs from policy API Group instead. - SELinuxStrategyMustRunAs SELinuxStrategy = "MustRunAs" - // SELinuxStrategyRunAsAny means that container may make requests for any SELinux context labels. - // Deprecated: use SELinuxStrategyRunAsAny from policy API Group instead. - SELinuxStrategyRunAsAny SELinuxStrategy = "RunAsAny" -) - -// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use RunAsUserStrategyOptions from policy API Group instead. -type RunAsUserStrategyOptions struct { - // rule is the strategy that will dictate the allowable RunAsUser values that may be set. - Rule RunAsUserStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsUserStrategy"` - // ranges are the allowed ranges of uids that may be used. If you would like to force a single uid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use RunAsGroupStrategyOptions from policy API Group instead. -type RunAsGroupStrategyOptions struct { - // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. - Rule RunAsGroupStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsGroupStrategy"` - // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// IDRange provides a min/max of an allowed range of IDs. -// Deprecated: use IDRange from policy API Group instead. -type IDRange struct { - // min is the start of the range, inclusive. - Min int64 `json:"min" protobuf:"varint,1,opt,name=min"` - // max is the end of the range, inclusive. - Max int64 `json:"max" protobuf:"varint,2,opt,name=max"` -} - -// RunAsUserStrategy denotes strategy types for generating RunAsUser values for a -// Security Context. -// Deprecated: use RunAsUserStrategy from policy API Group instead. -type RunAsUserStrategy string - -const ( - // RunAsUserStrategyMustRunAs means that container must run as a particular uid. - // Deprecated: use RunAsUserStrategyMustRunAs from policy API Group instead. - RunAsUserStrategyMustRunAs RunAsUserStrategy = "MustRunAs" - // RunAsUserStrategyMustRunAsNonRoot means that container must run as a non-root uid. - // Deprecated: use RunAsUserStrategyMustRunAsNonRoot from policy API Group instead. - RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategy = "MustRunAsNonRoot" - // RunAsUserStrategyRunAsAny means that container may make requests for any uid. - // Deprecated: use RunAsUserStrategyRunAsAny from policy API Group instead. - RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" -) - -// RunAsGroupStrategy denotes strategy types for generating RunAsGroup values for a -// Security Context. -// Deprecated: use RunAsGroupStrategy from policy API Group instead. -type RunAsGroupStrategy string - -const ( - // RunAsGroupStrategyMayRunAs means that container does not need to run with a particular gid. - // However, when RunAsGroup are specified, they have to fall in the defined range. - RunAsGroupStrategyMayRunAs RunAsGroupStrategy = "MayRunAs" - // RunAsGroupStrategyMustRunAs means that container must run as a particular gid. - // Deprecated: use RunAsGroupStrategyMustRunAs from policy API Group instead. - RunAsGroupStrategyMustRunAs RunAsGroupStrategy = "MustRunAs" - // RunAsGroupStrategyRunAsAny means that container may make requests for any gid. - // Deprecated: use RunAsGroupStrategyRunAsAny from policy API Group instead. - RunAsGroupStrategyRunAsAny RunAsGroupStrategy = "RunAsAny" -) - -// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. -// Deprecated: use FSGroupStrategyOptions from policy API Group instead. -type FSGroupStrategyOptions struct { - // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. - // +optional - Rule FSGroupStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=FSGroupStrategyType"` - // ranges are the allowed ranges of fs groups. If you would like to force a single - // fs group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// FSGroupStrategyType denotes strategy types for generating FSGroup values for a -// SecurityContext -// Deprecated: use FSGroupStrategyType from policy API Group instead. -type FSGroupStrategyType string - -const ( - // FSGroupStrategyMustRunAs meant that container must have FSGroup of X applied. - // Deprecated: use FSGroupStrategyMustRunAs from policy API Group instead. - FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" - // FSGroupStrategyRunAsAny means that container may make requests for any FSGroup labels. - // Deprecated: use FSGroupStrategyRunAsAny from policy API Group instead. - FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny" -) - -// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. -// Deprecated: use SupplementalGroupsStrategyOptions from policy API Group instead. -type SupplementalGroupsStrategyOptions struct { - // rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. - // +optional - Rule SupplementalGroupsStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=SupplementalGroupsStrategyType"` - // ranges are the allowed ranges of supplemental groups. If you would like to force a single - // supplemental group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental -// groups for a SecurityContext. -// Deprecated: use SupplementalGroupsStrategyType from policy API Group instead. -type SupplementalGroupsStrategyType string - -const ( - // SupplementalGroupsStrategyMustRunAs means that container must run as a particular gid. - // Deprecated: use SupplementalGroupsStrategyMustRunAs from policy API Group instead. - SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" - // SupplementalGroupsStrategyRunAsAny means that container may make requests for any gid. - // Deprecated: use SupplementalGroupsStrategyRunAsAny from policy API Group instead. - SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" -) - -// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses -// for a pod. -type RuntimeClassStrategyOptions struct { - // allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. - // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the - // list. An empty list requires the RuntimeClassName field to be unset. - AllowedRuntimeClassNames []string `json:"allowedRuntimeClassNames" protobuf:"bytes,1,rep,name=allowedRuntimeClassNames"` - // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. - // The default MUST be allowed by the allowedRuntimeClassNames list. - // A value of nil does not mutate the Pod. - // +optional - DefaultRuntimeClassName *string `json:"defaultRuntimeClassName,omitempty" protobuf:"bytes,2,opt,name=defaultRuntimeClassName"` -} - -// AllowAllRuntimeClassNames can be used as a value for the -// RuntimeClassStrategyOptions.AllowedRuntimeClassNames field and means that any RuntimeClassName is -// allowed. -const AllowAllRuntimeClassNames = "*" - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.2 -// +k8s:prerelease-lifecycle-gen:deprecated=1.11 -// +k8s:prerelease-lifecycle-gen:removed=1.16 -// +k8s:prerelease-lifecycle-gen:replacement=policy,v1beta1,PodSecurityPolicyList - -// PodSecurityPolicyList is a list of PodSecurityPolicy objects. -// Deprecated: use PodSecurityPolicyList from policy API Group instead. -type PodSecurityPolicyList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is a list of schema objects. - Items []PodSecurityPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` -} - // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.3 diff --git a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go index 302eb95382f6..fecc838152e6 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go @@ -24,37 +24,9 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_AllowedCSIDriver = map[string]string{ - "": "AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used.", - "name": "Name is the registered name of the CSI driver", -} - -func (AllowedCSIDriver) SwaggerDoc() map[string]string { - return map_AllowedCSIDriver -} - -var map_AllowedFlexVolume = map[string]string{ - "": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used. Deprecated: use AllowedFlexVolume from policy API Group instead.", - "driver": "driver is the name of the Flexvolume driver.", -} - -func (AllowedFlexVolume) SwaggerDoc() map[string]string { - return map_AllowedFlexVolume -} - -var map_AllowedHostPath = map[string]string{ - "": "AllowedHostPath defines the host volume conditions that will be enabled by a policy for pods to use. It requires the path prefix to be defined. Deprecated: use AllowedHostPath from policy API Group instead.", - "pathPrefix": "pathPrefix is the path prefix that the host volume must match. It does not support `*`. Trailing slashes are trimmed when validating the path prefix with a host path.\n\nExamples: `/foo` would allow `/foo`, `/foo/` and `/foo/bar` `/foo` would not allow `/food` or `/etc/foo`", - "readOnly": "when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly.", -} - -func (AllowedHostPath) SwaggerDoc() map[string]string { - return map_AllowedHostPath -} - var map_DaemonSet = map[string]string{ "": "DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for more information. DaemonSet represents the configuration of a daemon set.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -220,16 +192,6 @@ func (DeploymentStrategy) SwaggerDoc() map[string]string { return map_DeploymentStrategy } -var map_FSGroupStrategyOptions = map[string]string{ - "": "FSGroupStrategyOptions defines the strategy type and options used to create the strategy. Deprecated: use FSGroupStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate what FSGroup is used in the SecurityContext.", - "ranges": "ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (FSGroupStrategyOptions) SwaggerDoc() map[string]string { - return map_FSGroupStrategyOptions -} - var map_HTTPIngressPath = map[string]string{ "": "HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.", "path": "Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched.", @@ -250,26 +212,6 @@ func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { return map_HTTPIngressRuleValue } -var map_HostPortRange = map[string]string{ - "": "HostPortRange defines a range of host ports that will be enabled by a policy for pods to use. It requires both the start and end to be defined. Deprecated: use HostPortRange from policy API Group instead.", - "min": "min is the start of the range, inclusive.", - "max": "max is the end of the range, inclusive.", -} - -func (HostPortRange) SwaggerDoc() map[string]string { - return map_HostPortRange -} - -var map_IDRange = map[string]string{ - "": "IDRange provides a min/max of an allowed range of IDs. Deprecated: use IDRange from policy API Group instead.", - "min": "min is the start of the range, inclusive.", - "max": "max is the end of the range, inclusive.", -} - -func (IDRange) SwaggerDoc() map[string]string { - return map_IDRange -} - var map_IPBlock = map[string]string{ "": "DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. IPBlock describes a particular CIDR (Ex. \"192.168.1.0/24\",\"2001:db8::/64\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", "cidr": "CIDR is a string representing the IP Block Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\"", @@ -476,58 +418,6 @@ func (NetworkPolicyStatus) SwaggerDoc() map[string]string { return map_NetworkPolicyStatus } -var map_PodSecurityPolicy = map[string]string{ - "": "PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container. Deprecated: use PodSecurityPolicy from policy API Group instead.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec defines the policy enforced.", -} - -func (PodSecurityPolicy) SwaggerDoc() map[string]string { - return map_PodSecurityPolicy -} - -var map_PodSecurityPolicyList = map[string]string{ - "": "PodSecurityPolicyList is a list of PodSecurityPolicy objects. Deprecated: use PodSecurityPolicyList from policy API Group instead.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "items is a list of schema objects.", -} - -func (PodSecurityPolicyList) SwaggerDoc() map[string]string { - return map_PodSecurityPolicyList -} - -var map_PodSecurityPolicySpec = map[string]string{ - "": "PodSecurityPolicySpec defines the policy enforced. Deprecated: use PodSecurityPolicySpec from policy API Group instead.", - "privileged": "privileged determines if a pod can request to be run as privileged.", - "defaultAddCapabilities": "defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the allowedCapabilities list.", - "requiredDropCapabilities": "requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.", - "allowedCapabilities": "allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both allowedCapabilities and requiredDropCapabilities.", - "volumes": "volumes is an allowlist of volume plugins. Empty indicates that no volumes may be used. To allow all volumes you may use '*'.", - "hostNetwork": "hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.", - "hostPorts": "hostPorts determines which host port ranges are allowed to be exposed.", - "hostPID": "hostPID determines if the policy allows the use of HostPID in the pod spec.", - "hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.", - "seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.", - "runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.", - "runAsGroup": "RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. If this field is omitted, the pod's RunAsGroup can take any value. This field requires the RunAsGroup feature gate to be enabled.", - "supplementalGroups": "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", - "fsGroup": "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.", - "readOnlyRootFilesystem": "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", - "defaultAllowPrivilegeEscalation": "defaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.", - "allowPrivilegeEscalation": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", - "allowedHostPaths": "allowedHostPaths is an allowlist of host paths. Empty indicates that all host paths may be used.", - "allowedFlexVolumes": "allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field.", - "allowedCSIDrivers": "AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes.", - "allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to allowlist all unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", - "forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", - "allowedProcMountTypes": "AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled.", - "runtimeClass": "runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. If this field is omitted, the pod's runtimeClassName field is unrestricted. Enforcement of this field depends on the RuntimeClass feature gate being enabled.", -} - -func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { - return map_PodSecurityPolicySpec -} - var map_ReplicaSet = map[string]string{ "": "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for more information. ReplicaSet ensures that a specified number of pod replicas are running at any given time.", "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -617,46 +507,6 @@ func (RollingUpdateDeployment) SwaggerDoc() map[string]string { return map_RollingUpdateDeployment } -var map_RunAsGroupStrategyOptions = map[string]string{ - "": "RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use RunAsGroupStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate the allowable RunAsGroup values that may be set.", - "ranges": "ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (RunAsGroupStrategyOptions) SwaggerDoc() map[string]string { - return map_RunAsGroupStrategyOptions -} - -var map_RunAsUserStrategyOptions = map[string]string{ - "": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use RunAsUserStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate the allowable RunAsUser values that may be set.", - "ranges": "ranges are the allowed ranges of uids that may be used. If you would like to force a single uid then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { - return map_RunAsUserStrategyOptions -} - -var map_RuntimeClassStrategyOptions = map[string]string{ - "": "RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses for a pod.", - "allowedRuntimeClassNames": "allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. A value of \"*\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset.", - "defaultRuntimeClassName": "defaultRuntimeClassName is the default RuntimeClassName to set on the pod. The default MUST be allowed by the allowedRuntimeClassNames list. A value of nil does not mutate the Pod.", -} - -func (RuntimeClassStrategyOptions) SwaggerDoc() map[string]string { - return map_RuntimeClassStrategyOptions -} - -var map_SELinuxStrategyOptions = map[string]string{ - "": "SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use SELinuxStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate the allowable labels that may be set.", - "seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", -} - -func (SELinuxStrategyOptions) SwaggerDoc() map[string]string { - return map_SELinuxStrategyOptions -} - var map_Scale = map[string]string{ "": "represents a scaling request for a resource.", "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", @@ -688,14 +538,4 @@ func (ScaleStatus) SwaggerDoc() map[string]string { return map_ScaleStatus } -var map_SupplementalGroupsStrategyOptions = map[string]string{ - "": "SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. Deprecated: use SupplementalGroupsStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.", - "ranges": "ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (SupplementalGroupsStrategyOptions) SwaggerDoc() map[string]string { - return map_SupplementalGroupsStrategyOptions -} - // AUTO-GENERATED FUNCTIONS END HERE diff --git a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go index 671aa2d9dc8b..b6e92729928c 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go +++ b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go @@ -28,54 +28,6 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedCSIDriver) DeepCopyInto(out *AllowedCSIDriver) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedCSIDriver. -func (in *AllowedCSIDriver) DeepCopy() *AllowedCSIDriver { - if in == nil { - return nil - } - out := new(AllowedCSIDriver) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume. -func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume { - if in == nil { - return nil - } - out := new(AllowedFlexVolume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedHostPath) DeepCopyInto(out *AllowedHostPath) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedHostPath. -func (in *AllowedHostPath) DeepCopy() *AllowedHostPath { - if in == nil { - return nil - } - out := new(AllowedHostPath) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DaemonSet) DeepCopyInto(out *DaemonSet) { *out = *in @@ -435,27 +387,6 @@ func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FSGroupStrategyOptions) DeepCopyInto(out *FSGroupStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSGroupStrategyOptions. -func (in *FSGroupStrategyOptions) DeepCopy() *FSGroupStrategyOptions { - if in == nil { - return nil - } - out := new(FSGroupStrategyOptions) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) { *out = *in @@ -501,38 +432,6 @@ func (in *HTTPIngressRuleValue) DeepCopy() *HTTPIngressRuleValue { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostPortRange) DeepCopyInto(out *HostPortRange) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPortRange. -func (in *HostPortRange) DeepCopy() *HostPortRange { - if in == nil { - return nil - } - out := new(HostPortRange) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IDRange) DeepCopyInto(out *IDRange) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IDRange. -func (in *IDRange) DeepCopy() *IDRange { - if in == nil { - return nil - } - out := new(IDRange) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IPBlock) DeepCopyInto(out *IPBlock) { *out = *in @@ -1062,161 +961,6 @@ func (in *NetworkPolicyStatus) DeepCopy() *NetworkPolicyStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityPolicy) DeepCopyInto(out *PodSecurityPolicy) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicy. -func (in *PodSecurityPolicy) DeepCopy() *PodSecurityPolicy { - if in == nil { - return nil - } - out := new(PodSecurityPolicy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityPolicyList) DeepCopyInto(out *PodSecurityPolicyList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodSecurityPolicy, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyList. -func (in *PodSecurityPolicyList) DeepCopy() *PodSecurityPolicyList { - if in == nil { - return nil - } - out := new(PodSecurityPolicyList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSecurityPolicyList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { - *out = *in - if in.DefaultAddCapabilities != nil { - in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities - *out = make([]corev1.Capability, len(*in)) - copy(*out, *in) - } - if in.RequiredDropCapabilities != nil { - in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities - *out = make([]corev1.Capability, len(*in)) - copy(*out, *in) - } - if in.AllowedCapabilities != nil { - in, out := &in.AllowedCapabilities, &out.AllowedCapabilities - *out = make([]corev1.Capability, len(*in)) - copy(*out, *in) - } - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]FSType, len(*in)) - copy(*out, *in) - } - if in.HostPorts != nil { - in, out := &in.HostPorts, &out.HostPorts - *out = make([]HostPortRange, len(*in)) - copy(*out, *in) - } - in.SELinux.DeepCopyInto(&out.SELinux) - in.RunAsUser.DeepCopyInto(&out.RunAsUser) - if in.RunAsGroup != nil { - in, out := &in.RunAsGroup, &out.RunAsGroup - *out = new(RunAsGroupStrategyOptions) - (*in).DeepCopyInto(*out) - } - in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups) - in.FSGroup.DeepCopyInto(&out.FSGroup) - if in.DefaultAllowPrivilegeEscalation != nil { - in, out := &in.DefaultAllowPrivilegeEscalation, &out.DefaultAllowPrivilegeEscalation - *out = new(bool) - **out = **in - } - if in.AllowPrivilegeEscalation != nil { - in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation - *out = new(bool) - **out = **in - } - if in.AllowedHostPaths != nil { - in, out := &in.AllowedHostPaths, &out.AllowedHostPaths - *out = make([]AllowedHostPath, len(*in)) - copy(*out, *in) - } - if in.AllowedFlexVolumes != nil { - in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes - *out = make([]AllowedFlexVolume, len(*in)) - copy(*out, *in) - } - if in.AllowedCSIDrivers != nil { - in, out := &in.AllowedCSIDrivers, &out.AllowedCSIDrivers - *out = make([]AllowedCSIDriver, len(*in)) - copy(*out, *in) - } - if in.AllowedUnsafeSysctls != nil { - in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ForbiddenSysctls != nil { - in, out := &in.ForbiddenSysctls, &out.ForbiddenSysctls - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.AllowedProcMountTypes != nil { - in, out := &in.AllowedProcMountTypes, &out.AllowedProcMountTypes - *out = make([]corev1.ProcMountType, len(*in)) - copy(*out, *in) - } - if in.RuntimeClass != nil { - in, out := &in.RuntimeClass, &out.RuntimeClass - *out = new(RuntimeClassStrategyOptions) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySpec. -func (in *PodSecurityPolicySpec) DeepCopy() *PodSecurityPolicySpec { - if in == nil { - return nil - } - out := new(PodSecurityPolicySpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) { *out = *in @@ -1413,95 +1157,6 @@ func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RunAsGroupStrategyOptions) DeepCopyInto(out *RunAsGroupStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsGroupStrategyOptions. -func (in *RunAsGroupStrategyOptions) DeepCopy() *RunAsGroupStrategyOptions { - if in == nil { - return nil - } - out := new(RunAsGroupStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsUserStrategyOptions. -func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions { - if in == nil { - return nil - } - out := new(RunAsUserStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RuntimeClassStrategyOptions) DeepCopyInto(out *RuntimeClassStrategyOptions) { - *out = *in - if in.AllowedRuntimeClassNames != nil { - in, out := &in.AllowedRuntimeClassNames, &out.AllowedRuntimeClassNames - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.DefaultRuntimeClassName != nil { - in, out := &in.DefaultRuntimeClassName, &out.DefaultRuntimeClassName - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassStrategyOptions. -func (in *RuntimeClassStrategyOptions) DeepCopy() *RuntimeClassStrategyOptions { - if in == nil { - return nil - } - out := new(RuntimeClassStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) { - *out = *in - if in.SELinuxOptions != nil { - in, out := &in.SELinuxOptions, &out.SELinuxOptions - *out = new(corev1.SELinuxOptions) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxStrategyOptions. -func (in *SELinuxStrategyOptions) DeepCopy() *SELinuxStrategyOptions { - if in == nil { - return nil - } - out := new(SELinuxStrategyOptions) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Scale) DeepCopyInto(out *Scale) { *out = *in @@ -1568,24 +1223,3 @@ func (in *ScaleStatus) DeepCopy() *ScaleStatus { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SupplementalGroupsStrategyOptions) DeepCopyInto(out *SupplementalGroupsStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SupplementalGroupsStrategyOptions. -func (in *SupplementalGroupsStrategyOptions) DeepCopy() *SupplementalGroupsStrategyOptions { - if in == nil { - return nil - } - out := new(SupplementalGroupsStrategyOptions) - in.DeepCopyInto(out) - return out -} diff --git a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go index 963aaffba35a..5c93542282ef 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go @@ -235,54 +235,6 @@ func (in *NetworkPolicyList) APILifecycleRemoved() (major, minor int) { return 1, 16 } -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *PodSecurityPolicy) APILifecycleIntroduced() (major, minor int) { - return 1, 2 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *PodSecurityPolicy) APILifecycleDeprecated() (major, minor int) { - return 1, 11 -} - -// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. -// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. -func (in *PodSecurityPolicy) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "policy", Version: "v1beta1", Kind: "PodSecurityPolicy"} -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *PodSecurityPolicy) APILifecycleRemoved() (major, minor int) { - return 1, 16 -} - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *PodSecurityPolicyList) APILifecycleIntroduced() (major, minor int) { - return 1, 2 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *PodSecurityPolicyList) APILifecycleDeprecated() (major, minor int) { - return 1, 11 -} - -// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. -// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. -func (in *PodSecurityPolicyList) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "policy", Version: "v1beta1", Kind: "PodSecurityPolicyList"} -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *PodSecurityPolicyList) APILifecycleRemoved() (major, minor int) { - return 1, 16 -} - // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *ReplicaSet) APILifecycleIntroduced() (major, minor int) { diff --git a/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go index ac6f7179a0d3..c95999fa5e07 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_FlowDistinguisherMethod = map[string]string{ diff --git a/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go index fe4f8022a6bd..fc08e128db3c 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_FlowDistinguisherMethod = map[string]string{ diff --git a/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go index 4bedcce3ed13..b2eff7f96e7d 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta2 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_FlowDistinguisherMethod = map[string]string{ diff --git a/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1beta3/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1beta3/types_swagger_doc_generated.go index e2bd27e8c5d4..728252c0cf2c 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1beta3/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1beta3/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta3 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_FlowDistinguisherMethod = map[string]string{ diff --git a/cluster-autoscaler/vendor/k8s.io/api/networking/v1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/networking/v1/generated.proto index 8196a14b96bf..ed194a89d564 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/networking/v1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/networking/v1/generated.proto @@ -33,14 +33,14 @@ option go_package = "k8s.io/api/networking/v1"; // HTTPIngressPath associates a path with a backend. Incoming urls matching the // path are forwarded to the backend. message HTTPIngressPath { - // Path is matched against the path of an incoming request. Currently it can + // path is matched against the path of an incoming request. Currently it can // contain characters disallowed from the conventional "path" part of a URL // as defined by RFC 3986. Paths must begin with a '/' and must be present // when using PathType with value "Exact" or "Prefix". // +optional optional string path = 1; - // PathType determines the interpretation of the Path matching. PathType can + // pathType determines the interpretation of the path matching. PathType can // be one of the following values: // * Exact: Matches the URL path exactly. // * Prefix: Matches based on a URL path prefix split by '/'. Matching is @@ -56,7 +56,7 @@ message HTTPIngressPath { // Implementations are required to support all path types. optional string pathType = 3; - // Backend defines the referenced service endpoint to which the traffic + // backend defines the referenced service endpoint to which the traffic // will be forwarded to. optional IngressBackend backend = 2; } @@ -67,7 +67,7 @@ message HTTPIngressPath { // to match against everything after the last '/' and before the first '?' // or '#'. message HTTPIngressRuleValue { - // A collection of paths that map requests to backends. + // paths is a collection of paths that map requests to backends. // +listType=atomic repeated HTTPIngressPath paths = 1; } @@ -76,13 +76,13 @@ message HTTPIngressRuleValue { // to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs // that should not be included within this rule. message IPBlock { - // CIDR is a string representing the IP Block + // cidr is a string representing the IPBlock // Valid examples are "192.168.1.0/24" or "2001:db8::/64" optional string cidr = 1; - // Except is a slice of CIDRs that should not be included within an IP Block + // except is a slice of CIDRs that should not be included within an IPBlock // Valid examples are "192.168.1.0/24" or "2001:db8::/64" - // Except values will be rejected if they are outside the CIDR range + // Except values will be rejected if they are outside the cidr range // +optional repeated string except = 2; } @@ -97,12 +97,12 @@ message Ingress { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec is the desired state of the Ingress. + // spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressSpec spec = 2; - // Status is the current state of the Ingress. + // status is the current state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressStatus status = 3; @@ -110,12 +110,12 @@ message Ingress { // IngressBackend describes all endpoints for a given service and port. message IngressBackend { - // Service references a Service as a Backend. + // service references a service as a backend. // This is a mutually exclusive setting with "Resource". // +optional optional IngressServiceBackend service = 4; - // Resource is an ObjectRef to another Kubernetes resource in the namespace + // resource is an ObjectRef to another Kubernetes resource in the namespace // of the Ingress object. If resource is specified, a service.Name and // service.Port must not be specified. // This is a mutually exclusive setting with "Service". @@ -134,7 +134,7 @@ message IngressClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec is the desired state of the IngressClass. + // spec is the desired state of the IngressClass. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressClassSpec spec = 2; @@ -146,31 +146,31 @@ message IngressClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of IngressClasses. + // items is the list of IngressClasses. repeated IngressClass items = 2; } // IngressClassParametersReference identifies an API object. This can be used // to specify a cluster or namespace-scoped resource. message IngressClassParametersReference { - // APIGroup is the group for the resource being referenced. If APIGroup is + // apiGroup is the group for the resource being referenced. If APIGroup is // not specified, the specified Kind must be in the core API group. For any // other third-party types, APIGroup is required. // +optional optional string aPIGroup = 1; - // Kind is the type of resource being referenced. + // kind is the type of resource being referenced. optional string kind = 2; - // Name is the name of resource being referenced. + // name is the name of resource being referenced. optional string name = 3; - // Scope represents if this refers to a cluster or namespace scoped resource. + // scope represents if this refers to a cluster or namespace scoped resource. // This may be set to "Cluster" (default) or "Namespace". // +optional optional string scope = 4; - // Namespace is the namespace of the resource being referenced. This field is + // namespace is the namespace of the resource being referenced. This field is // required when scope is set to "Namespace" and must be unset when scope is set to // "Cluster". // +optional @@ -179,15 +179,15 @@ message IngressClassParametersReference { // IngressClassSpec provides information about the class of an Ingress. message IngressClassSpec { - // Controller refers to the name of the controller that should handle this + // controller refers to the name of the controller that should handle this // class. This allows for different "flavors" that are controlled by the - // same controller. For example, you may have different Parameters for the + // same controller. For example, you may have different parameters for the // same implementing controller. This should be specified as a // domain-prefixed path no more than 250 characters in length, e.g. // "acme.io/ingress-controller". This field is immutable. optional string controller = 1; - // Parameters is a link to a custom resource containing additional + // parameters is a link to a custom resource containing additional // configuration for the controller. This is optional if the controller does // not require extra parameters. // +optional @@ -201,21 +201,21 @@ message IngressList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of Ingress. + // items is the list of Ingress. repeated Ingress items = 2; } // IngressLoadBalancerIngress represents the status of a load-balancer ingress point. message IngressLoadBalancerIngress { - // IP is set for load-balancer ingress points that are IP based. + // ip is set for load-balancer ingress points that are IP based. // +optional optional string ip = 1; - // Hostname is set for load-balancer ingress points that are DNS based. + // hostname is set for load-balancer ingress points that are DNS based. // +optional optional string hostname = 2; - // Ports provides information about the ports exposed by this LoadBalancer. + // ports provides information about the ports exposed by this LoadBalancer. // +listType=atomic // +optional repeated IngressPortStatus ports = 4; @@ -223,21 +223,21 @@ message IngressLoadBalancerIngress { // IngressLoadBalancerStatus represents the status of a load-balancer. message IngressLoadBalancerStatus { - // Ingress is a list containing ingress points for the load-balancer. + // ingress is a list containing ingress points for the load-balancer. // +optional repeated IngressLoadBalancerIngress ingress = 1; } // IngressPortStatus represents the error condition of a service port message IngressPortStatus { - // Port is the port number of the ingress port. + // port is the port number of the ingress port. optional int32 port = 1; - // Protocol is the protocol of the ingress port. + // protocol is the protocol of the ingress port. // The supported values are: "TCP", "UDP", "SCTP" optional string protocol = 2; - // Error is to record the problem with the service port + // error is to record the problem with the service port // The format of the error shall comply with the following rules: // - built-in error values shall be specified in this file and those shall use // CamelCase names @@ -256,7 +256,7 @@ message IngressPortStatus { // the related backend services. Incoming requests are first evaluated for a host // match, then routed to the backend associated with the matching IngressRuleValue. message IngressRule { - // Host is the fully qualified domain name of a network host, as defined by RFC 3986. + // host is the fully qualified domain name of a network host, as defined by RFC 3986. // Note the following deviations from the "host" part of the // URI as defined in RFC 3986: // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to @@ -269,14 +269,14 @@ message IngressRule { // IngressRuleValue. If the host is unspecified, the Ingress routes all // traffic based on the specified IngressRuleValue. // - // Host can be "precise" which is a domain name without the terminating dot of + // host can be "precise" which is a domain name without the terminating dot of // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name // prefixed with a single wildcard label (e.g. "*.foo.com"). // The wildcard character '*' must appear by itself as the first DNS label and // matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*"). // Requests will be matched against the Host field in the following way: - // 1. If Host is precise, the request matches this rule if the http host header is equal to Host. - // 2. If Host is a wildcard, then the request matches this rule if the http host header + // 1. If host is precise, the request matches this rule if the http host header is equal to Host. + // 2. If host is a wildcard, then the request matches this rule if the http host header // is to equal to the suffix (removing the first label) of the wildcard rule. // +optional optional string host = 1; @@ -301,18 +301,18 @@ message IngressRuleValue { // IngressServiceBackend references a Kubernetes Service as a Backend. message IngressServiceBackend { - // Name is the referenced service. The service must exist in + // name is the referenced service. The service must exist in // the same namespace as the Ingress object. optional string name = 1; - // Port of the referenced service. A port name or port number + // port of the referenced service. A port name or port number // is required for a IngressServiceBackend. optional ServiceBackendPort port = 2; } // IngressSpec describes the Ingress the user wishes to exist. message IngressSpec { - // IngressClassName is the name of an IngressClass cluster resource. Ingress + // ingressClassName is the name of an IngressClass cluster resource. Ingress // controller implementations use this field to know whether they should be // serving this Ingress resource, by a transitive connection // (controller -> IngressClass -> Ingress resource). Although the @@ -325,24 +325,24 @@ message IngressSpec { // +optional optional string ingressClassName = 4; - // DefaultBackend is the backend that should handle requests that don't + // defaultBackend is the backend that should handle requests that don't // match any rule. If Rules are not specified, DefaultBackend must be specified. // If DefaultBackend is not set, the handling of requests that do not match any // of the rules will be up to the Ingress controller. // +optional optional IngressBackend defaultBackend = 1; - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified + // tls represents the TLS configuration. Currently the Ingress only supports a + // single TLS port, 443. If multiple members of this list specify different hosts, + // they will be multiplexed on the same port according to the hostname specified // through the SNI TLS extension, if the ingress controller fulfilling the // ingress supports SNI. // +listType=atomic // +optional repeated IngressTLS tls = 2; - // A list of host rules used to configure the Ingress. If unspecified, or - // no rule matches, all traffic is sent to the default backend. + // rules is a list of host rules used to configure the Ingress. If unspecified, + // or no rule matches, all traffic is sent to the default backend. // +listType=atomic // +optional repeated IngressRule rules = 3; @@ -350,14 +350,14 @@ message IngressSpec { // IngressStatus describe the current state of the Ingress. message IngressStatus { - // LoadBalancer contains the current status of the load-balancer. + // loadBalancer contains the current status of the load-balancer. // +optional optional IngressLoadBalancerStatus loadBalancer = 1; } -// IngressTLS describes the transport layer security associated with an Ingress. +// IngressTLS describes the transport layer security associated with an ingress. message IngressTLS { - // Hosts are a list of hosts included in the TLS certificate. The values in + // hosts is a list of hosts included in the TLS certificate. The values in // this list must match the name/s used in the tlsSecret. Defaults to the // wildcard host setting for the loadbalancer controller fulfilling this // Ingress, if left unspecified. @@ -365,11 +365,11 @@ message IngressTLS { // +optional repeated string hosts = 1; - // SecretName is the name of the secret used to terminate TLS traffic on + // secretName is the name of the secret used to terminate TLS traffic on // port 443. Field is left optional to allow TLS routing based on SNI // hostname alone. If the SNI host in a listener conflicts with the "Host" // header field used by an IngressRule, the SNI host is used for termination - // and value of the Host header is used for routing. + // and value of the "Host" header is used for routing. // +optional optional string secretName = 2; } @@ -381,11 +381,11 @@ message NetworkPolicy { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the desired behavior for this NetworkPolicy. + // spec represents the specification of the desired behavior for this NetworkPolicy. // +optional optional NetworkPolicySpec spec = 2; - // Status is the current state of the NetworkPolicy. + // status represents the current state of the NetworkPolicy. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional NetworkPolicyStatus status = 3; @@ -395,7 +395,7 @@ message NetworkPolicy { // matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. // This type is beta-level in 1.8 message NetworkPolicyEgressRule { - // List of destination ports for outgoing traffic. + // ports is a list of destination ports for outgoing traffic. // Each item in this list is combined using a logical OR. If this field is // empty or missing, this rule matches all ports (traffic not restricted by port). // If this field is present and contains at least one item, then this rule allows @@ -403,7 +403,7 @@ message NetworkPolicyEgressRule { // +optional repeated NetworkPolicyPort ports = 1; - // List of destinations for outgoing traffic of pods selected for this rule. + // to is a list of destinations for outgoing traffic of pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all destinations (traffic not restricted by // destination). If this field is present and contains at least one item, this rule @@ -415,15 +415,15 @@ message NetworkPolicyEgressRule { // NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods // matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from. message NetworkPolicyIngressRule { - // List of ports which should be made accessible on the pods selected for this - // rule. Each item in this list is combined using a logical OR. If this field is + // ports is a list of ports which should be made accessible on the pods selected for + // this rule. Each item in this list is combined using a logical OR. If this field is // empty or missing, this rule matches all ports (traffic not restricted by port). // If this field is present and contains at least one item, then this rule allows // traffic only if the traffic matches at least one port in the list. // +optional repeated NetworkPolicyPort ports = 1; - // List of sources which should be able to access the pods selected for this rule. + // from is a list of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all sources (traffic not restricted by // source). If this field is present and contains at least one item, this rule @@ -439,32 +439,32 @@ message NetworkPolicyList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated NetworkPolicy items = 2; } // NetworkPolicyPeer describes a peer to allow traffic to/from. Only certain combinations of // fields are allowed message NetworkPolicyPeer { - // This is a label selector which selects Pods. This field follows standard label + // podSelector is a label selector which selects pods. This field follows standard label // selector semantics; if present but empty, it selects all pods. // - // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects - // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. - // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. + // If namespaceSelector is also set, then the NetworkPolicyPeer as a whole selects + // the pods matching podSelector in the Namespaces selected by NamespaceSelector. + // Otherwise it selects the pods matching podSelector in the policy's own namespace. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; - // Selects Namespaces using cluster-scoped labels. This field follows standard label - // selector semantics; if present but empty, it selects all namespaces. + // namespaceSelector selects namespaces using cluster-scoped labels. This field follows + // standard label selector semantics; if present but empty, it selects all namespaces. // - // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects - // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. - // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. + // If podSelector is also set, then the NetworkPolicyPeer as a whole selects + // the pods matching podSelector in the namespaces selected by namespaceSelector. + // Otherwise it selects all pods in the namespaces selected by namespaceSelector. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2; - // IPBlock defines policy on a particular IPBlock. If this field is set then + // ipBlock defines policy on a particular IPBlock. If this field is set then // neither of the other fields can be. // +optional optional IPBlock ipBlock = 3; @@ -472,19 +472,19 @@ message NetworkPolicyPeer { // NetworkPolicyPort describes a port to allow traffic on message NetworkPolicyPort { - // The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this - // field defaults to TCP. + // protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match. + // If not specified, this field defaults to TCP. // +optional optional string protocol = 1; - // The port on the given protocol. This can either be a numerical or named + // port represents the port on the given protocol. This can either be a numerical or named // port on a pod. If this field is not provided, this matches all port names and // numbers. // If present, only traffic on the specified protocol AND port will be matched. // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; - // If set, indicates that the range of ports from port to endPort, inclusive, + // endPort indicates that the range of ports from port to endPort if set, inclusive, // should be allowed by the policy. This field cannot be defined if the port field // is not defined or if the port field is defined as a named (string) port. // The endPort must be equal or greater than port. @@ -494,16 +494,16 @@ message NetworkPolicyPort { // NetworkPolicySpec provides the specification of a NetworkPolicy message NetworkPolicySpec { - // Selects the pods to which this NetworkPolicy object applies. The array of - // ingress rules is applied to any pods selected by this field. Multiple network - // policies can select the same set of pods. In this case, the ingress rules for - // each are combined additively. This field is NOT optional and follows standard - // label selector semantics. An empty podSelector matches all pods in this - // namespace. + // podSelector selects the pods to which this NetworkPolicy object applies. + // The array of ingress rules is applied to any pods selected by this field. + // Multiple network policies can select the same set of pods. In this case, + // the ingress rules for each are combined additively. + // This field is NOT optional and follows standard label selector semantics. + // An empty podSelector matches all pods in this namespace. optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; - // List of ingress rules to be applied to the selected pods. Traffic is allowed to - // a pod if there are no NetworkPolicies selecting the pod + // ingress is a list of ingress rules to be applied to the selected pods. + // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod // (and cluster policy otherwise allows the traffic), OR if the traffic source is // the pod's local node, OR if the traffic matches at least one ingress rule // across all of the NetworkPolicy objects whose podSelector matches the pod. If @@ -512,8 +512,8 @@ message NetworkPolicySpec { // +optional repeated NetworkPolicyIngressRule ingress = 2; - // List of egress rules to be applied to the selected pods. Outgoing traffic is - // allowed if there are no NetworkPolicies selecting the pod (and cluster policy + // egress is a list of egress rules to be applied to the selected pods. Outgoing traffic + // is allowed if there are no NetworkPolicies selecting the pod (and cluster policy // otherwise allows the traffic), OR if the traffic matches at least one egress rule // across all of the NetworkPolicy objects whose podSelector matches the pod. If // this field is empty then this NetworkPolicy limits all outgoing traffic (and serves @@ -522,23 +522,23 @@ message NetworkPolicySpec { // +optional repeated NetworkPolicyEgressRule egress = 3; - // List of rule types that the NetworkPolicy relates to. + // policyTypes is a list of rule types that the NetworkPolicy relates to. // Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. - // If this field is not specified, it will default based on the existence of Ingress or Egress rules; - // policies that contain an Egress section are assumed to affect Egress, and all policies - // (whether or not they contain an Ingress section) are assumed to affect Ingress. + // If this field is not specified, it will default based on the existence of ingress or egress rules; + // policies that contain an egress section are assumed to affect egress, and all policies + // (whether or not they contain an ingress section) are assumed to affect ingress. // If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. // Likewise, if you want to write a policy that specifies that no egress is allowed, // you must specify a policyTypes value that include "Egress" (since such a policy would not include - // an Egress section and would otherwise default to just [ "Ingress" ]). + // an egress section and would otherwise default to just [ "Ingress" ]). // This field is beta-level in 1.8 // +optional repeated string policyTypes = 4; } -// NetworkPolicyStatus describe the current state of the NetworkPolicy. +// NetworkPolicyStatus describes the current state of the NetworkPolicy. message NetworkPolicyStatus { - // Conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. + // conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. // Current service state // +optional // +patchMergeKey=type @@ -550,12 +550,12 @@ message NetworkPolicyStatus { // ServiceBackendPort is the service port being referenced. message ServiceBackendPort { - // Name is the name of the port on the Service. + // name is the name of the port on the Service. // This is a mutually exclusive setting with "Number". // +optional optional string name = 1; - // Number is the numerical port number (e.g. 80) on the Service. + // number is the numerical port number (e.g. 80) on the Service. // This is a mutually exclusive setting with "Name". // +optional optional int32 number = 2; diff --git a/cluster-autoscaler/vendor/k8s.io/api/networking/v1/types.go b/cluster-autoscaler/vendor/k8s.io/api/networking/v1/types.go index a9deb900a005..fa7cf1bd700f 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/networking/v1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/networking/v1/types.go @@ -28,16 +28,17 @@ import ( // NetworkPolicy describes what network traffic is allowed for a set of Pods type NetworkPolicy struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the desired behavior for this NetworkPolicy. + // spec represents the specification of the desired behavior for this NetworkPolicy. // +optional Spec NetworkPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Status is the current state of the NetworkPolicy. + // status represents the current state of the NetworkPolicy. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status NetworkPolicyStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` @@ -57,16 +58,16 @@ const ( // NetworkPolicySpec provides the specification of a NetworkPolicy type NetworkPolicySpec struct { - // Selects the pods to which this NetworkPolicy object applies. The array of - // ingress rules is applied to any pods selected by this field. Multiple network - // policies can select the same set of pods. In this case, the ingress rules for - // each are combined additively. This field is NOT optional and follows standard - // label selector semantics. An empty podSelector matches all pods in this - // namespace. + // podSelector selects the pods to which this NetworkPolicy object applies. + // The array of ingress rules is applied to any pods selected by this field. + // Multiple network policies can select the same set of pods. In this case, + // the ingress rules for each are combined additively. + // This field is NOT optional and follows standard label selector semantics. + // An empty podSelector matches all pods in this namespace. PodSelector metav1.LabelSelector `json:"podSelector" protobuf:"bytes,1,opt,name=podSelector"` - // List of ingress rules to be applied to the selected pods. Traffic is allowed to - // a pod if there are no NetworkPolicies selecting the pod + // ingress is a list of ingress rules to be applied to the selected pods. + // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod // (and cluster policy otherwise allows the traffic), OR if the traffic source is // the pod's local node, OR if the traffic matches at least one ingress rule // across all of the NetworkPolicy objects whose podSelector matches the pod. If @@ -75,8 +76,8 @@ type NetworkPolicySpec struct { // +optional Ingress []NetworkPolicyIngressRule `json:"ingress,omitempty" protobuf:"bytes,2,rep,name=ingress"` - // List of egress rules to be applied to the selected pods. Outgoing traffic is - // allowed if there are no NetworkPolicies selecting the pod (and cluster policy + // egress is a list of egress rules to be applied to the selected pods. Outgoing traffic + // is allowed if there are no NetworkPolicies selecting the pod (and cluster policy // otherwise allows the traffic), OR if the traffic matches at least one egress rule // across all of the NetworkPolicy objects whose podSelector matches the pod. If // this field is empty then this NetworkPolicy limits all outgoing traffic (and serves @@ -85,15 +86,15 @@ type NetworkPolicySpec struct { // +optional Egress []NetworkPolicyEgressRule `json:"egress,omitempty" protobuf:"bytes,3,rep,name=egress"` - // List of rule types that the NetworkPolicy relates to. + // policyTypes is a list of rule types that the NetworkPolicy relates to. // Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. - // If this field is not specified, it will default based on the existence of Ingress or Egress rules; - // policies that contain an Egress section are assumed to affect Egress, and all policies - // (whether or not they contain an Ingress section) are assumed to affect Ingress. + // If this field is not specified, it will default based on the existence of ingress or egress rules; + // policies that contain an egress section are assumed to affect egress, and all policies + // (whether or not they contain an ingress section) are assumed to affect ingress. // If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. // Likewise, if you want to write a policy that specifies that no egress is allowed, // you must specify a policyTypes value that include "Egress" (since such a policy would not include - // an Egress section and would otherwise default to just [ "Ingress" ]). + // an egress section and would otherwise default to just [ "Ingress" ]). // This field is beta-level in 1.8 // +optional PolicyTypes []PolicyType `json:"policyTypes,omitempty" protobuf:"bytes,4,rep,name=policyTypes,casttype=PolicyType"` @@ -102,15 +103,15 @@ type NetworkPolicySpec struct { // NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods // matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from. type NetworkPolicyIngressRule struct { - // List of ports which should be made accessible on the pods selected for this - // rule. Each item in this list is combined using a logical OR. If this field is + // ports is a list of ports which should be made accessible on the pods selected for + // this rule. Each item in this list is combined using a logical OR. If this field is // empty or missing, this rule matches all ports (traffic not restricted by port). // If this field is present and contains at least one item, then this rule allows // traffic only if the traffic matches at least one port in the list. // +optional Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` - // List of sources which should be able to access the pods selected for this rule. + // from is a list of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all sources (traffic not restricted by // source). If this field is present and contains at least one item, this rule @@ -123,7 +124,7 @@ type NetworkPolicyIngressRule struct { // matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. // This type is beta-level in 1.8 type NetworkPolicyEgressRule struct { - // List of destination ports for outgoing traffic. + // ports is a list of destination ports for outgoing traffic. // Each item in this list is combined using a logical OR. If this field is // empty or missing, this rule matches all ports (traffic not restricted by port). // If this field is present and contains at least one item, then this rule allows @@ -131,7 +132,7 @@ type NetworkPolicyEgressRule struct { // +optional Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` - // List of destinations for outgoing traffic of pods selected for this rule. + // to is a list of destinations for outgoing traffic of pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all destinations (traffic not restricted by // destination). If this field is present and contains at least one item, this rule @@ -142,19 +143,19 @@ type NetworkPolicyEgressRule struct { // NetworkPolicyPort describes a port to allow traffic on type NetworkPolicyPort struct { - // The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this - // field defaults to TCP. + // protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match. + // If not specified, this field defaults to TCP. // +optional Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,1,opt,name=protocol,casttype=k8s.io/api/core/v1.Protocol"` - // The port on the given protocol. This can either be a numerical or named + // port represents the port on the given protocol. This can either be a numerical or named // port on a pod. If this field is not provided, this matches all port names and // numbers. // If present, only traffic on the specified protocol AND port will be matched. // +optional Port *intstr.IntOrString `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"` - // If set, indicates that the range of ports from port to endPort, inclusive, + // endPort indicates that the range of ports from port to endPort if set, inclusive, // should be allowed by the policy. This field cannot be defined if the port field // is not defined or if the port field is defined as a named (string) port. // The endPort must be equal or greater than port. @@ -166,12 +167,13 @@ type NetworkPolicyPort struct { // to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs // that should not be included within this rule. type IPBlock struct { - // CIDR is a string representing the IP Block + // cidr is a string representing the IPBlock // Valid examples are "192.168.1.0/24" or "2001:db8::/64" CIDR string `json:"cidr" protobuf:"bytes,1,name=cidr"` - // Except is a slice of CIDRs that should not be included within an IP Block + + // except is a slice of CIDRs that should not be included within an IPBlock // Valid examples are "192.168.1.0/24" or "2001:db8::/64" - // Except values will be rejected if they are outside the CIDR range + // Except values will be rejected if they are outside the cidr range // +optional Except []string `json:"except,omitempty" protobuf:"bytes,2,rep,name=except"` } @@ -179,25 +181,25 @@ type IPBlock struct { // NetworkPolicyPeer describes a peer to allow traffic to/from. Only certain combinations of // fields are allowed type NetworkPolicyPeer struct { - // This is a label selector which selects Pods. This field follows standard label + // podSelector is a label selector which selects pods. This field follows standard label // selector semantics; if present but empty, it selects all pods. // - // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects - // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. - // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. + // If namespaceSelector is also set, then the NetworkPolicyPeer as a whole selects + // the pods matching podSelector in the Namespaces selected by NamespaceSelector. + // Otherwise it selects the pods matching podSelector in the policy's own namespace. // +optional PodSelector *metav1.LabelSelector `json:"podSelector,omitempty" protobuf:"bytes,1,opt,name=podSelector"` - // Selects Namespaces using cluster-scoped labels. This field follows standard label - // selector semantics; if present but empty, it selects all namespaces. + // namespaceSelector selects namespaces using cluster-scoped labels. This field follows + // standard label selector semantics; if present but empty, it selects all namespaces. // - // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects - // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. - // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. + // If podSelector is also set, then the NetworkPolicyPeer as a whole selects + // the pods matching podSelector in the namespaces selected by namespaceSelector. + // Otherwise it selects all pods in the namespaces selected by namespaceSelector. // +optional NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,2,opt,name=namespaceSelector"` - // IPBlock defines policy on a particular IPBlock. If this field is set then + // ipBlock defines policy on a particular IPBlock. If this field is set then // neither of the other fields can be. // +optional IPBlock *IPBlock `json:"ipBlock,omitempty" protobuf:"bytes,3,rep,name=ipBlock"` @@ -233,9 +235,9 @@ const ( NetworkPolicyConditionReasonFeatureNotSupported NetworkPolicyConditionReason = "FeatureNotSupported" ) -// NetworkPolicyStatus describe the current state of the NetworkPolicy. +// NetworkPolicyStatus describes the current state of the NetworkPolicy. type NetworkPolicyStatus struct { - // Conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. + // conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. // Current service state // +optional // +patchMergeKey=type @@ -250,12 +252,13 @@ type NetworkPolicyStatus struct { // NetworkPolicyList is a list of NetworkPolicy objects. type NetworkPolicyList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []NetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -268,17 +271,18 @@ type NetworkPolicyList struct { // based virtual hosting etc. type Ingress struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec is the desired state of the Ingress. + // spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Status is the current state of the Ingress. + // status is the current state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` @@ -289,18 +293,19 @@ type Ingress struct { // IngressList is a collection of Ingress. type IngressList struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of Ingress. + // items is the list of Ingress. Items []Ingress `json:"items" protobuf:"bytes,2,rep,name=items"` } // IngressSpec describes the Ingress the user wishes to exist. type IngressSpec struct { - // IngressClassName is the name of an IngressClass cluster resource. Ingress + // ingressClassName is the name of an IngressClass cluster resource. Ingress // controller implementations use this field to know whether they should be // serving this Ingress resource, by a transitive connection // (controller -> IngressClass -> Ingress resource). Although the @@ -313,72 +318,73 @@ type IngressSpec struct { // +optional IngressClassName *string `json:"ingressClassName,omitempty" protobuf:"bytes,4,opt,name=ingressClassName"` - // DefaultBackend is the backend that should handle requests that don't + // defaultBackend is the backend that should handle requests that don't // match any rule. If Rules are not specified, DefaultBackend must be specified. // If DefaultBackend is not set, the handling of requests that do not match any // of the rules will be up to the Ingress controller. // +optional DefaultBackend *IngressBackend `json:"defaultBackend,omitempty" protobuf:"bytes,1,opt,name=defaultBackend"` - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified + // tls represents the TLS configuration. Currently the Ingress only supports a + // single TLS port, 443. If multiple members of this list specify different hosts, + // they will be multiplexed on the same port according to the hostname specified // through the SNI TLS extension, if the ingress controller fulfilling the // ingress supports SNI. // +listType=atomic // +optional TLS []IngressTLS `json:"tls,omitempty" protobuf:"bytes,2,rep,name=tls"` - // A list of host rules used to configure the Ingress. If unspecified, or - // no rule matches, all traffic is sent to the default backend. + // rules is a list of host rules used to configure the Ingress. If unspecified, + // or no rule matches, all traffic is sent to the default backend. // +listType=atomic // +optional Rules []IngressRule `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` } -// IngressTLS describes the transport layer security associated with an Ingress. +// IngressTLS describes the transport layer security associated with an ingress. type IngressTLS struct { - // Hosts are a list of hosts included in the TLS certificate. The values in + // hosts is a list of hosts included in the TLS certificate. The values in // this list must match the name/s used in the tlsSecret. Defaults to the // wildcard host setting for the loadbalancer controller fulfilling this // Ingress, if left unspecified. // +listType=atomic // +optional Hosts []string `json:"hosts,omitempty" protobuf:"bytes,1,rep,name=hosts"` - // SecretName is the name of the secret used to terminate TLS traffic on + + // secretName is the name of the secret used to terminate TLS traffic on // port 443. Field is left optional to allow TLS routing based on SNI // hostname alone. If the SNI host in a listener conflicts with the "Host" // header field used by an IngressRule, the SNI host is used for termination - // and value of the Host header is used for routing. + // and value of the "Host" header is used for routing. // +optional SecretName string `json:"secretName,omitempty" protobuf:"bytes,2,opt,name=secretName"` } // IngressStatus describe the current state of the Ingress. type IngressStatus struct { - // LoadBalancer contains the current status of the load-balancer. + // loadBalancer contains the current status of the load-balancer. // +optional LoadBalancer IngressLoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` } // IngressLoadBalancerStatus represents the status of a load-balancer. type IngressLoadBalancerStatus struct { - // Ingress is a list containing ingress points for the load-balancer. + // ingress is a list containing ingress points for the load-balancer. // +optional Ingress []IngressLoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"` } // IngressLoadBalancerIngress represents the status of a load-balancer ingress point. type IngressLoadBalancerIngress struct { - // IP is set for load-balancer ingress points that are IP based. + // ip is set for load-balancer ingress points that are IP based. // +optional IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` - // Hostname is set for load-balancer ingress points that are DNS based. + // hostname is set for load-balancer ingress points that are DNS based. // +optional Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` - // Ports provides information about the ports exposed by this LoadBalancer. + // ports provides information about the ports exposed by this LoadBalancer. // +listType=atomic // +optional Ports []IngressPortStatus `json:"ports,omitempty" protobuf:"bytes,4,rep,name=ports"` @@ -386,14 +392,14 @@ type IngressLoadBalancerIngress struct { // IngressPortStatus represents the error condition of a service port type IngressPortStatus struct { - // Port is the port number of the ingress port. + // port is the port number of the ingress port. Port int32 `json:"port" protobuf:"varint,1,opt,name=port"` - // Protocol is the protocol of the ingress port. + // protocol is the protocol of the ingress port. // The supported values are: "TCP", "UDP", "SCTP" Protocol v1.Protocol `json:"protocol" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"` - // Error is to record the problem with the service port + // error is to record the problem with the service port // The format of the error shall comply with the following rules: // - built-in error values shall be specified in this file and those shall use // CamelCase names @@ -412,7 +418,7 @@ type IngressPortStatus struct { // the related backend services. Incoming requests are first evaluated for a host // match, then routed to the backend associated with the matching IngressRuleValue. type IngressRule struct { - // Host is the fully qualified domain name of a network host, as defined by RFC 3986. + // host is the fully qualified domain name of a network host, as defined by RFC 3986. // Note the following deviations from the "host" part of the // URI as defined in RFC 3986: // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to @@ -425,14 +431,14 @@ type IngressRule struct { // IngressRuleValue. If the host is unspecified, the Ingress routes all // traffic based on the specified IngressRuleValue. // - // Host can be "precise" which is a domain name without the terminating dot of + // host can be "precise" which is a domain name without the terminating dot of // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name // prefixed with a single wildcard label (e.g. "*.foo.com"). // The wildcard character '*' must appear by itself as the first DNS label and // matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*"). // Requests will be matched against the Host field in the following way: - // 1. If Host is precise, the request matches this rule if the http host header is equal to Host. - // 2. If Host is a wildcard, then the request matches this rule if the http host header + // 1. If host is precise, the request matches this rule if the http host header is equal to Host. + // 2. If host is a wildcard, then the request matches this rule if the http host header // is to equal to the suffix (removing the first label) of the wildcard rule. // +optional Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` @@ -460,7 +466,7 @@ type IngressRuleValue struct { // to match against everything after the last '/' and before the first '?' // or '#'. type HTTPIngressRuleValue struct { - // A collection of paths that map requests to backends. + // paths is a collection of paths that map requests to backends. // +listType=atomic Paths []HTTPIngressPath `json:"paths" protobuf:"bytes,1,rep,name=paths"` } @@ -499,14 +505,14 @@ const ( // HTTPIngressPath associates a path with a backend. Incoming urls matching the // path are forwarded to the backend. type HTTPIngressPath struct { - // Path is matched against the path of an incoming request. Currently it can + // path is matched against the path of an incoming request. Currently it can // contain characters disallowed from the conventional "path" part of a URL // as defined by RFC 3986. Paths must begin with a '/' and must be present // when using PathType with value "Exact" or "Prefix". // +optional Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` - // PathType determines the interpretation of the Path matching. PathType can + // pathType determines the interpretation of the path matching. PathType can // be one of the following values: // * Exact: Matches the URL path exactly. // * Prefix: Matches based on a URL path prefix split by '/'. Matching is @@ -522,19 +528,19 @@ type HTTPIngressPath struct { // Implementations are required to support all path types. PathType *PathType `json:"pathType" protobuf:"bytes,3,opt,name=pathType"` - // Backend defines the referenced service endpoint to which the traffic + // backend defines the referenced service endpoint to which the traffic // will be forwarded to. Backend IngressBackend `json:"backend" protobuf:"bytes,2,opt,name=backend"` } // IngressBackend describes all endpoints for a given service and port. type IngressBackend struct { - // Service references a Service as a Backend. + // service references a service as a backend. // This is a mutually exclusive setting with "Resource". // +optional Service *IngressServiceBackend `json:"service,omitempty" protobuf:"bytes,4,opt,name=service"` - // Resource is an ObjectRef to another Kubernetes resource in the namespace + // resource is an ObjectRef to another Kubernetes resource in the namespace // of the Ingress object. If resource is specified, a service.Name and // service.Port must not be specified. // This is a mutually exclusive setting with "Service". @@ -544,23 +550,23 @@ type IngressBackend struct { // IngressServiceBackend references a Kubernetes Service as a Backend. type IngressServiceBackend struct { - // Name is the referenced service. The service must exist in + // name is the referenced service. The service must exist in // the same namespace as the Ingress object. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Port of the referenced service. A port name or port number + // port of the referenced service. A port name or port number // is required for a IngressServiceBackend. Port ServiceBackendPort `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"` } // ServiceBackendPort is the service port being referenced. type ServiceBackendPort struct { - // Name is the name of the port on the Service. + // name is the name of the port on the Service. // This is a mutually exclusive setting with "Number". // +optional Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` - // Number is the numerical port number (e.g. 80) on the Service. + // number is the numerical port number (e.g. 80) on the Service. // This is a mutually exclusive setting with "Name". // +optional Number int32 `json:"number,omitempty" protobuf:"bytes,2,opt,name=number"` @@ -577,12 +583,13 @@ type ServiceBackendPort struct { // resources without a class specified will be assigned this default class. type IngressClass struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec is the desired state of the IngressClass. + // spec is the desired state of the IngressClass. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec IngressClassSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -590,15 +597,15 @@ type IngressClass struct { // IngressClassSpec provides information about the class of an Ingress. type IngressClassSpec struct { - // Controller refers to the name of the controller that should handle this + // controller refers to the name of the controller that should handle this // class. This allows for different "flavors" that are controlled by the - // same controller. For example, you may have different Parameters for the + // same controller. For example, you may have different parameters for the // same implementing controller. This should be specified as a // domain-prefixed path no more than 250 characters in length, e.g. // "acme.io/ingress-controller". This field is immutable. Controller string `json:"controller,omitempty" protobuf:"bytes,1,opt,name=controller"` - // Parameters is a link to a custom resource containing additional + // parameters is a link to a custom resource containing additional // configuration for the controller. This is optional if the controller does // not require extra parameters. // +optional @@ -617,20 +624,24 @@ const ( // IngressClassParametersReference identifies an API object. This can be used // to specify a cluster or namespace-scoped resource. type IngressClassParametersReference struct { - // APIGroup is the group for the resource being referenced. If APIGroup is + // apiGroup is the group for the resource being referenced. If APIGroup is // not specified, the specified Kind must be in the core API group. For any // other third-party types, APIGroup is required. // +optional APIGroup *string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=aPIGroup"` - // Kind is the type of resource being referenced. + + // kind is the type of resource being referenced. Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` - // Name is the name of resource being referenced. + + // name is the name of resource being referenced. Name string `json:"name" protobuf:"bytes,3,opt,name=name"` - // Scope represents if this refers to a cluster or namespace scoped resource. + + // scope represents if this refers to a cluster or namespace scoped resource. // This may be set to "Cluster" (default) or "Namespace". // +optional Scope *string `json:"scope" protobuf:"bytes,4,opt,name=scope"` - // Namespace is the namespace of the resource being referenced. This field is + + // namespace is the namespace of the resource being referenced. This field is // required when scope is set to "Namespace" and must be unset when scope is set to // "Cluster". // +optional @@ -642,10 +653,11 @@ type IngressClassParametersReference struct { // IngressClassList is a collection of IngressClasses. type IngressClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of IngressClasses. + // items is the list of IngressClasses. Items []IngressClass `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/cluster-autoscaler/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go index 94ccf964b74b..91161d5ca4e6 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go @@ -24,14 +24,14 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_HTTPIngressPath = map[string]string{ "": "HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.", - "path": "Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value \"Exact\" or \"Prefix\".", - "pathType": "PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types.", - "backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.", + "path": "path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value \"Exact\" or \"Prefix\".", + "pathType": "pathType determines the interpretation of the path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types.", + "backend": "backend defines the referenced service endpoint to which the traffic will be forwarded to.", } func (HTTPIngressPath) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (HTTPIngressPath) SwaggerDoc() map[string]string { var map_HTTPIngressRuleValue = map[string]string{ "": "HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http:///? -> backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.", - "paths": "A collection of paths that map requests to backends.", + "paths": "paths is a collection of paths that map requests to backends.", } func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { @@ -49,8 +49,8 @@ func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { var map_IPBlock = map[string]string{ "": "IPBlock describes a particular CIDR (Ex. \"192.168.1.0/24\",\"2001:db8::/64\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", - "cidr": "CIDR is a string representing the IP Block Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\"", - "except": "Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\" Except values will be rejected if they are outside the CIDR range", + "cidr": "cidr is a string representing the IPBlock Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\"", + "except": "except is a slice of CIDRs that should not be included within an IPBlock Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\" Except values will be rejected if they are outside the cidr range", } func (IPBlock) SwaggerDoc() map[string]string { @@ -60,8 +60,8 @@ func (IPBlock) SwaggerDoc() map[string]string { var map_Ingress = map[string]string{ "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Ingress) SwaggerDoc() map[string]string { @@ -70,8 +70,8 @@ func (Ingress) SwaggerDoc() map[string]string { var map_IngressBackend = map[string]string{ "": "IngressBackend describes all endpoints for a given service and port.", - "service": "Service references a Service as a Backend. This is a mutually exclusive setting with \"Resource\".", - "resource": "Resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, a service.Name and service.Port must not be specified. This is a mutually exclusive setting with \"Service\".", + "service": "service references a service as a backend. This is a mutually exclusive setting with \"Resource\".", + "resource": "resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, a service.Name and service.Port must not be specified. This is a mutually exclusive setting with \"Service\".", } func (IngressBackend) SwaggerDoc() map[string]string { @@ -81,7 +81,7 @@ func (IngressBackend) SwaggerDoc() map[string]string { var map_IngressClass = map[string]string{ "": "IngressClass represents the class of the Ingress, referenced by the Ingress Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be used to indicate that an IngressClass should be considered default. When a single IngressClass resource has this annotation set to true, new Ingress resources without a class specified will be assigned this default class.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (IngressClass) SwaggerDoc() map[string]string { @@ -91,7 +91,7 @@ func (IngressClass) SwaggerDoc() map[string]string { var map_IngressClassList = map[string]string{ "": "IngressClassList is a collection of IngressClasses.", "metadata": "Standard list metadata.", - "items": "Items is the list of IngressClasses.", + "items": "items is the list of IngressClasses.", } func (IngressClassList) SwaggerDoc() map[string]string { @@ -100,11 +100,11 @@ func (IngressClassList) SwaggerDoc() map[string]string { var map_IngressClassParametersReference = map[string]string{ "": "IngressClassParametersReference identifies an API object. This can be used to specify a cluster or namespace-scoped resource.", - "apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", - "kind": "Kind is the type of resource being referenced.", - "name": "Name is the name of resource being referenced.", - "scope": "Scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\".", - "namespace": "Namespace is the namespace of the resource being referenced. This field is required when scope is set to \"Namespace\" and must be unset when scope is set to \"Cluster\".", + "apiGroup": "apiGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + "kind": "kind is the type of resource being referenced.", + "name": "name is the name of resource being referenced.", + "scope": "scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\".", + "namespace": "namespace is the namespace of the resource being referenced. This field is required when scope is set to \"Namespace\" and must be unset when scope is set to \"Cluster\".", } func (IngressClassParametersReference) SwaggerDoc() map[string]string { @@ -113,8 +113,8 @@ func (IngressClassParametersReference) SwaggerDoc() map[string]string { var map_IngressClassSpec = map[string]string{ "": "IngressClassSpec provides information about the class of an Ingress.", - "controller": "Controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable.", - "parameters": "Parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters.", + "controller": "controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable.", + "parameters": "parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters.", } func (IngressClassSpec) SwaggerDoc() map[string]string { @@ -124,7 +124,7 @@ func (IngressClassSpec) SwaggerDoc() map[string]string { var map_IngressList = map[string]string{ "": "IngressList is a collection of Ingress.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of Ingress.", + "items": "items is the list of Ingress.", } func (IngressList) SwaggerDoc() map[string]string { @@ -133,9 +133,9 @@ func (IngressList) SwaggerDoc() map[string]string { var map_IngressLoadBalancerIngress = map[string]string{ "": "IngressLoadBalancerIngress represents the status of a load-balancer ingress point.", - "ip": "IP is set for load-balancer ingress points that are IP based.", - "hostname": "Hostname is set for load-balancer ingress points that are DNS based.", - "ports": "Ports provides information about the ports exposed by this LoadBalancer.", + "ip": "ip is set for load-balancer ingress points that are IP based.", + "hostname": "hostname is set for load-balancer ingress points that are DNS based.", + "ports": "ports provides information about the ports exposed by this LoadBalancer.", } func (IngressLoadBalancerIngress) SwaggerDoc() map[string]string { @@ -144,7 +144,7 @@ func (IngressLoadBalancerIngress) SwaggerDoc() map[string]string { var map_IngressLoadBalancerStatus = map[string]string{ "": "IngressLoadBalancerStatus represents the status of a load-balancer.", - "ingress": "Ingress is a list containing ingress points for the load-balancer.", + "ingress": "ingress is a list containing ingress points for the load-balancer.", } func (IngressLoadBalancerStatus) SwaggerDoc() map[string]string { @@ -153,9 +153,9 @@ func (IngressLoadBalancerStatus) SwaggerDoc() map[string]string { var map_IngressPortStatus = map[string]string{ "": "IngressPortStatus represents the error condition of a service port", - "port": "Port is the port number of the ingress port.", - "protocol": "Protocol is the protocol of the ingress port. The supported values are: \"TCP\", \"UDP\", \"SCTP\"", - "error": "Error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.", + "port": "port is the port number of the ingress port.", + "protocol": "protocol is the protocol of the ingress port. The supported values are: \"TCP\", \"UDP\", \"SCTP\"", + "error": "error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.", } func (IngressPortStatus) SwaggerDoc() map[string]string { @@ -164,7 +164,7 @@ func (IngressPortStatus) SwaggerDoc() map[string]string { var map_IngressRule = map[string]string{ "": "IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.", - "host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nHost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.", + "host": "host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nhost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If host is precise, the request matches this rule if the http host header is equal to Host. 2. If host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.", } func (IngressRule) SwaggerDoc() map[string]string { @@ -181,8 +181,8 @@ func (IngressRuleValue) SwaggerDoc() map[string]string { var map_IngressServiceBackend = map[string]string{ "": "IngressServiceBackend references a Kubernetes Service as a Backend.", - "name": "Name is the referenced service. The service must exist in the same namespace as the Ingress object.", - "port": "Port of the referenced service. A port name or port number is required for a IngressServiceBackend.", + "name": "name is the referenced service. The service must exist in the same namespace as the Ingress object.", + "port": "port of the referenced service. A port name or port number is required for a IngressServiceBackend.", } func (IngressServiceBackend) SwaggerDoc() map[string]string { @@ -191,10 +191,10 @@ func (IngressServiceBackend) SwaggerDoc() map[string]string { var map_IngressSpec = map[string]string{ "": "IngressSpec describes the Ingress the user wishes to exist.", - "ingressClassName": "IngressClassName is the name of an IngressClass cluster resource. Ingress controller implementations use this field to know whether they should be serving this Ingress resource, by a transitive connection (controller -> IngressClass -> Ingress resource). Although the `kubernetes.io/ingress.class` annotation (simple constant name) was never formally defined, it was widely supported by Ingress controllers to create a direct binding between Ingress controller and Ingress resources. Newly created Ingress resources should prefer using the field. However, even though the annotation is officially deprecated, for backwards compatibility reasons, ingress controllers should still honor that annotation if present.", - "defaultBackend": "DefaultBackend is the backend that should handle requests that don't match any rule. If Rules are not specified, DefaultBackend must be specified. If DefaultBackend is not set, the handling of requests that do not match any of the rules will be up to the Ingress controller.", - "tls": "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", - "rules": "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", + "ingressClassName": "ingressClassName is the name of an IngressClass cluster resource. Ingress controller implementations use this field to know whether they should be serving this Ingress resource, by a transitive connection (controller -> IngressClass -> Ingress resource). Although the `kubernetes.io/ingress.class` annotation (simple constant name) was never formally defined, it was widely supported by Ingress controllers to create a direct binding between Ingress controller and Ingress resources. Newly created Ingress resources should prefer using the field. However, even though the annotation is officially deprecated, for backwards compatibility reasons, ingress controllers should still honor that annotation if present.", + "defaultBackend": "defaultBackend is the backend that should handle requests that don't match any rule. If Rules are not specified, DefaultBackend must be specified. If DefaultBackend is not set, the handling of requests that do not match any of the rules will be up to the Ingress controller.", + "tls": "tls represents the TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", + "rules": "rules is a list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", } func (IngressSpec) SwaggerDoc() map[string]string { @@ -203,7 +203,7 @@ func (IngressSpec) SwaggerDoc() map[string]string { var map_IngressStatus = map[string]string{ "": "IngressStatus describe the current state of the Ingress.", - "loadBalancer": "LoadBalancer contains the current status of the load-balancer.", + "loadBalancer": "loadBalancer contains the current status of the load-balancer.", } func (IngressStatus) SwaggerDoc() map[string]string { @@ -211,9 +211,9 @@ func (IngressStatus) SwaggerDoc() map[string]string { } var map_IngressTLS = map[string]string{ - "": "IngressTLS describes the transport layer security associated with an Ingress.", - "hosts": "Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", - "secretName": "SecretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.", + "": "IngressTLS describes the transport layer security associated with an ingress.", + "hosts": "hosts is a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", + "secretName": "secretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the \"Host\" header is used for routing.", } func (IngressTLS) SwaggerDoc() map[string]string { @@ -223,8 +223,8 @@ func (IngressTLS) SwaggerDoc() map[string]string { var map_NetworkPolicy = map[string]string{ "": "NetworkPolicy describes what network traffic is allowed for a set of Pods", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the desired behavior for this NetworkPolicy.", - "status": "Status is the current state of the NetworkPolicy. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec represents the specification of the desired behavior for this NetworkPolicy.", + "status": "status represents the current state of the NetworkPolicy. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (NetworkPolicy) SwaggerDoc() map[string]string { @@ -233,8 +233,8 @@ func (NetworkPolicy) SwaggerDoc() map[string]string { var map_NetworkPolicyEgressRule = map[string]string{ "": "NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. This type is beta-level in 1.8", - "ports": "List of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", - "to": "List of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list.", + "ports": "ports is a list of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", + "to": "to is a list of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list.", } func (NetworkPolicyEgressRule) SwaggerDoc() map[string]string { @@ -243,8 +243,8 @@ func (NetworkPolicyEgressRule) SwaggerDoc() map[string]string { var map_NetworkPolicyIngressRule = map[string]string{ "": "NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from.", - "ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", - "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list.", + "ports": "ports is a list of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", + "from": "from is a list of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list.", } func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { @@ -254,7 +254,7 @@ func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { var map_NetworkPolicyList = map[string]string{ "": "NetworkPolicyList is a list of NetworkPolicy objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (NetworkPolicyList) SwaggerDoc() map[string]string { @@ -263,9 +263,9 @@ func (NetworkPolicyList) SwaggerDoc() map[string]string { var map_NetworkPolicyPeer = map[string]string{ "": "NetworkPolicyPeer describes a peer to allow traffic to/from. Only certain combinations of fields are allowed", - "podSelector": "This is a label selector which selects Pods. This field follows standard label selector semantics; if present but empty, it selects all pods.\n\nIf NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects the Pods matching PodSelector in the policy's own Namespace.", - "namespaceSelector": "Selects Namespaces using cluster-scoped labels. This field follows standard label selector semantics; if present but empty, it selects all namespaces.\n\nIf PodSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector.", - "ipBlock": "IPBlock defines policy on a particular IPBlock. If this field is set then neither of the other fields can be.", + "podSelector": "podSelector is a label selector which selects pods. This field follows standard label selector semantics; if present but empty, it selects all pods.\n\nIf namespaceSelector is also set, then the NetworkPolicyPeer as a whole selects the pods matching podSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects the pods matching podSelector in the policy's own namespace.", + "namespaceSelector": "namespaceSelector selects namespaces using cluster-scoped labels. This field follows standard label selector semantics; if present but empty, it selects all namespaces.\n\nIf podSelector is also set, then the NetworkPolicyPeer as a whole selects the pods matching podSelector in the namespaces selected by namespaceSelector. Otherwise it selects all pods in the namespaces selected by namespaceSelector.", + "ipBlock": "ipBlock defines policy on a particular IPBlock. If this field is set then neither of the other fields can be.", } func (NetworkPolicyPeer) SwaggerDoc() map[string]string { @@ -274,9 +274,9 @@ func (NetworkPolicyPeer) SwaggerDoc() map[string]string { var map_NetworkPolicyPort = map[string]string{ "": "NetworkPolicyPort describes a port to allow traffic on", - "protocol": "The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP.", - "port": "The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.", - "endPort": "If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port.", + "protocol": "protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP.", + "port": "port represents the port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.", + "endPort": "endPort indicates that the range of ports from port to endPort if set, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port.", } func (NetworkPolicyPort) SwaggerDoc() map[string]string { @@ -285,10 +285,10 @@ func (NetworkPolicyPort) SwaggerDoc() map[string]string { var map_NetworkPolicySpec = map[string]string{ "": "NetworkPolicySpec provides the specification of a NetworkPolicy", - "podSelector": "Selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", - "ingress": "List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)", - "egress": "List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8", - "policyTypes": "List of rule types that the NetworkPolicy relates to. Valid options are [\"Ingress\"], [\"Egress\"], or [\"Ingress\", \"Egress\"]. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an Egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", + "podSelector": "podSelector selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", + "ingress": "ingress is a list of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)", + "egress": "egress is a list of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8", + "policyTypes": "policyTypes is a list of rule types that the NetworkPolicy relates to. Valid options are [\"Ingress\"], [\"Egress\"], or [\"Ingress\", \"Egress\"]. If this field is not specified, it will default based on the existence of ingress or egress rules; policies that contain an egress section are assumed to affect egress, and all policies (whether or not they contain an ingress section) are assumed to affect ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", } func (NetworkPolicySpec) SwaggerDoc() map[string]string { @@ -296,8 +296,8 @@ func (NetworkPolicySpec) SwaggerDoc() map[string]string { } var map_NetworkPolicyStatus = map[string]string{ - "": "NetworkPolicyStatus describe the current state of the NetworkPolicy.", - "conditions": "Conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. Current service state", + "": "NetworkPolicyStatus describes the current state of the NetworkPolicy.", + "conditions": "conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. Current service state", } func (NetworkPolicyStatus) SwaggerDoc() map[string]string { @@ -306,8 +306,8 @@ func (NetworkPolicyStatus) SwaggerDoc() map[string]string { var map_ServiceBackendPort = map[string]string{ "": "ServiceBackendPort is the service port being referenced.", - "name": "Name is the name of the port on the Service. This is a mutually exclusive setting with \"Number\".", - "number": "Number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \"Name\".", + "name": "name is the name of the port on the Service. This is a mutually exclusive setting with \"Number\".", + "number": "number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \"Name\".", } func (ServiceBackendPort) SwaggerDoc() map[string]string { diff --git a/cluster-autoscaler/vendor/k8s.io/api/networking/v1alpha1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/networking/v1alpha1/generated.proto index bbda585b855e..9dd91199836d 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/networking/v1alpha1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/networking/v1alpha1/generated.proto @@ -44,7 +44,7 @@ message ClusterCIDR { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec is the desired state of the ClusterCIDR. + // spec is the desired state of the ClusterCIDR. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ClusterCIDRSpec spec = 2; @@ -57,19 +57,19 @@ message ClusterCIDRList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of ClusterCIDRs. + // items is the list of ClusterCIDRs. repeated ClusterCIDR items = 2; } // ClusterCIDRSpec defines the desired state of ClusterCIDR. message ClusterCIDRSpec { - // NodeSelector defines which nodes the config is applicable to. - // An empty or nil NodeSelector selects all nodes. + // nodeSelector defines which nodes the config is applicable to. + // An empty or nil nodeSelector selects all nodes. // This field is immutable. // +optional optional k8s.io.api.core.v1.NodeSelector nodeSelector = 1; - // PerNodeHostBits defines the number of host bits to be configured per node. + // perNodeHostBits defines the number of host bits to be configured per node. // A subnet mask determines how much of the address is used for network bits // and host bits. For example an IPv4 address of 192.168.0.0/24, splits the // address into 24 bits for the network portion and 8 bits for the host portion. @@ -79,14 +79,14 @@ message ClusterCIDRSpec { // +required optional int32 perNodeHostBits = 2; - // IPv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). - // At least one of IPv4 and IPv6 must be specified. + // ipv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). + // At least one of ipv4 and ipv6 must be specified. // This field is immutable. // +optional optional string ipv4 = 3; - // IPv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64"). - // At least one of IPv4 and IPv6 must be specified. + // ipv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64"). + // At least one of ipv4 and ipv6 must be specified. // This field is immutable. // +optional optional string ipv6 = 4; diff --git a/cluster-autoscaler/vendor/k8s.io/api/networking/v1alpha1/types.go b/cluster-autoscaler/vendor/k8s.io/api/networking/v1alpha1/types.go index 734e9bf8a874..2a6e13a379d3 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/networking/v1alpha1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/networking/v1alpha1/types.go @@ -37,12 +37,13 @@ import ( // selector matches the Node may be used. type ClusterCIDR struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec is the desired state of the ClusterCIDR. + // spec is the desired state of the ClusterCIDR. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec ClusterCIDRSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -50,13 +51,13 @@ type ClusterCIDR struct { // ClusterCIDRSpec defines the desired state of ClusterCIDR. type ClusterCIDRSpec struct { - // NodeSelector defines which nodes the config is applicable to. - // An empty or nil NodeSelector selects all nodes. + // nodeSelector defines which nodes the config is applicable to. + // An empty or nil nodeSelector selects all nodes. // This field is immutable. // +optional NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"` - // PerNodeHostBits defines the number of host bits to be configured per node. + // perNodeHostBits defines the number of host bits to be configured per node. // A subnet mask determines how much of the address is used for network bits // and host bits. For example an IPv4 address of 192.168.0.0/24, splits the // address into 24 bits for the network portion and 8 bits for the host portion. @@ -66,14 +67,14 @@ type ClusterCIDRSpec struct { // +required PerNodeHostBits int32 `json:"perNodeHostBits" protobuf:"varint,2,opt,name=perNodeHostBits"` - // IPv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). - // At least one of IPv4 and IPv6 must be specified. + // ipv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). + // At least one of ipv4 and ipv6 must be specified. // This field is immutable. // +optional IPv4 string `json:"ipv4" protobuf:"bytes,3,opt,name=ipv4"` - // IPv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64"). - // At least one of IPv4 and IPv6 must be specified. + // ipv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64"). + // At least one of ipv4 and ipv6 must be specified. // This field is immutable. // +optional IPv6 string `json:"ipv6" protobuf:"bytes,4,opt,name=ipv6"` @@ -85,11 +86,12 @@ type ClusterCIDRSpec struct { // ClusterCIDRList contains a list of ClusterCIDR. type ClusterCIDRList struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of ClusterCIDRs. + // items is the list of ClusterCIDRs. Items []ClusterCIDR `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/cluster-autoscaler/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go index e0d4a478611a..34fda00a2175 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go @@ -24,13 +24,13 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ClusterCIDR = map[string]string{ "": "ClusterCIDR represents a single configuration for per-Node Pod CIDR allocations when the MultiCIDRRangeAllocator is enabled (see the config for kube-controller-manager). A cluster may have any number of ClusterCIDR resources, all of which will be considered when allocating a CIDR for a Node. A ClusterCIDR is eligible to be used for a given Node when the node selector matches the node in question and has free CIDRs to allocate. In case of multiple matching ClusterCIDR resources, the allocator will attempt to break ties using internal heuristics, but any ClusterCIDR whose node selector matches the Node may be used.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is the desired state of the ClusterCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec is the desired state of the ClusterCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (ClusterCIDR) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (ClusterCIDR) SwaggerDoc() map[string]string { var map_ClusterCIDRList = map[string]string{ "": "ClusterCIDRList contains a list of ClusterCIDR.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of ClusterCIDRs.", + "items": "items is the list of ClusterCIDRs.", } func (ClusterCIDRList) SwaggerDoc() map[string]string { @@ -49,10 +49,10 @@ func (ClusterCIDRList) SwaggerDoc() map[string]string { var map_ClusterCIDRSpec = map[string]string{ "": "ClusterCIDRSpec defines the desired state of ClusterCIDR.", - "nodeSelector": "NodeSelector defines which nodes the config is applicable to. An empty or nil NodeSelector selects all nodes. This field is immutable.", - "perNodeHostBits": "PerNodeHostBits defines the number of host bits to be configured per node. A subnet mask determines how much of the address is used for network bits and host bits. For example an IPv4 address of 192.168.0.0/24, splits the address into 24 bits for the network portion and 8 bits for the host portion. To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). Minimum value is 4 (16 IPs). This field is immutable.", - "ipv4": "IPv4 defines an IPv4 IP block in CIDR notation(e.g. \"10.0.0.0/8\"). At least one of IPv4 and IPv6 must be specified. This field is immutable.", - "ipv6": "IPv6 defines an IPv6 IP block in CIDR notation(e.g. \"2001:db8::/64\"). At least one of IPv4 and IPv6 must be specified. This field is immutable.", + "nodeSelector": "nodeSelector defines which nodes the config is applicable to. An empty or nil nodeSelector selects all nodes. This field is immutable.", + "perNodeHostBits": "perNodeHostBits defines the number of host bits to be configured per node. A subnet mask determines how much of the address is used for network bits and host bits. For example an IPv4 address of 192.168.0.0/24, splits the address into 24 bits for the network portion and 8 bits for the host portion. To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). Minimum value is 4 (16 IPs). This field is immutable.", + "ipv4": "ipv4 defines an IPv4 IP block in CIDR notation(e.g. \"10.0.0.0/8\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", + "ipv6": "ipv6 defines an IPv6 IP block in CIDR notation(e.g. \"2001:db8::/64\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", } func (ClusterCIDRSpec) SwaggerDoc() map[string]string { diff --git a/cluster-autoscaler/vendor/k8s.io/api/networking/v1beta1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/networking/v1beta1/generated.proto index 78ecf9fae26b..46bb7f66f22f 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/networking/v1beta1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/networking/v1beta1/generated.proto @@ -33,14 +33,14 @@ option go_package = "k8s.io/api/networking/v1beta1"; // HTTPIngressPath associates a path with a backend. Incoming urls matching the // path are forwarded to the backend. message HTTPIngressPath { - // Path is matched against the path of an incoming request. Currently it can + // path is matched against the path of an incoming request. Currently it can // contain characters disallowed from the conventional "path" part of a URL // as defined by RFC 3986. Paths must begin with a '/' and must be present // when using PathType with value "Exact" or "Prefix". // +optional optional string path = 1; - // PathType determines the interpretation of the Path matching. PathType can + // pathType determines the interpretation of the path matching. PathType can // be one of the following values: // * Exact: Matches the URL path exactly. // * Prefix: Matches based on a URL path prefix split by '/'. Matching is @@ -57,7 +57,7 @@ message HTTPIngressPath { // Defaults to ImplementationSpecific. optional string pathType = 3; - // Backend defines the referenced service endpoint to which the traffic + // backend defines the referenced service endpoint to which the traffic // will be forwarded to. optional IngressBackend backend = 2; } @@ -68,7 +68,7 @@ message HTTPIngressPath { // to match against everything after the last '/' and before the first '?' // or '#'. message HTTPIngressRuleValue { - // A collection of paths that map requests to backends. + // paths is a collection of paths that map requests to backends. repeated HTTPIngressPath paths = 1; } @@ -82,12 +82,12 @@ message Ingress { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec is the desired state of the Ingress. + // spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressSpec spec = 2; - // Status is the current state of the Ingress. + // status is the current state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressStatus status = 3; @@ -95,15 +95,15 @@ message Ingress { // IngressBackend describes all endpoints for a given service and port. message IngressBackend { - // Specifies the name of the referenced service. + // serviceName specifies the name of the referenced service. // +optional optional string serviceName = 1; - // Specifies the port of the referenced service. + // servicePort Specifies the port of the referenced service. // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2; - // Resource is an ObjectRef to another Kubernetes resource in the namespace + // resource is an ObjectRef to another Kubernetes resource in the namespace // of the Ingress object. If resource is specified, serviceName and servicePort // must not be specified. // +optional @@ -121,7 +121,7 @@ message IngressClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec is the desired state of the IngressClass. + // spec is the desired state of the IngressClass. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressClassSpec spec = 2; @@ -133,30 +133,30 @@ message IngressClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of IngressClasses. + // items is the list of IngressClasses. repeated IngressClass items = 2; } // IngressClassParametersReference identifies an API object. This can be used // to specify a cluster or namespace-scoped resource. message IngressClassParametersReference { - // APIGroup is the group for the resource being referenced. If APIGroup is + // apiGroup is the group for the resource being referenced. If APIGroup is // not specified, the specified Kind must be in the core API group. For any // other third-party types, APIGroup is required. // +optional optional string aPIGroup = 1; - // Kind is the type of resource being referenced. + // kind is the type of resource being referenced. optional string kind = 2; - // Name is the name of resource being referenced. + // name is the name of resource being referenced. optional string name = 3; - // Scope represents if this refers to a cluster or namespace scoped resource. + // scope represents if this refers to a cluster or namespace scoped resource. // This may be set to "Cluster" (default) or "Namespace". optional string scope = 4; - // Namespace is the namespace of the resource being referenced. This field is + // namespace is the namespace of the resource being referenced. This field is // required when scope is set to "Namespace" and must be unset when scope is set to // "Cluster". // +optional @@ -165,15 +165,15 @@ message IngressClassParametersReference { // IngressClassSpec provides information about the class of an Ingress. message IngressClassSpec { - // Controller refers to the name of the controller that should handle this + // controller refers to the name of the controller that should handle this // class. This allows for different "flavors" that are controlled by the - // same controller. For example, you may have different Parameters for the + // same controller. For example, you may have different parameters for the // same implementing controller. This should be specified as a // domain-prefixed path no more than 250 characters in length, e.g. // "acme.io/ingress-controller". This field is immutable. optional string controller = 1; - // Parameters is a link to a custom resource containing additional + // parameters is a link to a custom resource containing additional // configuration for the controller. This is optional if the controller does // not require extra parameters. // +optional @@ -187,21 +187,21 @@ message IngressList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of Ingress. + // items is the list of Ingress. repeated Ingress items = 2; } // IngressLoadBalancerIngress represents the status of a load-balancer ingress point. message IngressLoadBalancerIngress { - // IP is set for load-balancer ingress points that are IP based. + // ip is set for load-balancer ingress points that are IP based. // +optional optional string ip = 1; - // Hostname is set for load-balancer ingress points that are DNS based. + // hostname is set for load-balancer ingress points that are DNS based. // +optional optional string hostname = 2; - // Ports provides information about the ports exposed by this LoadBalancer. + // ports provides information about the ports exposed by this LoadBalancer. // +listType=atomic // +optional repeated IngressPortStatus ports = 4; @@ -209,21 +209,21 @@ message IngressLoadBalancerIngress { // LoadBalancerStatus represents the status of a load-balancer. message IngressLoadBalancerStatus { - // Ingress is a list containing ingress points for the load-balancer. + // ingress is a list containing ingress points for the load-balancer. // +optional repeated IngressLoadBalancerIngress ingress = 1; } // IngressPortStatus represents the error condition of a service port message IngressPortStatus { - // Port is the port number of the ingress port. + // port is the port number of the ingress port. optional int32 port = 1; - // Protocol is the protocol of the ingress port. + // protocol is the protocol of the ingress port. // The supported values are: "TCP", "UDP", "SCTP" optional string protocol = 2; - // Error is to record the problem with the service port + // error is to record the problem with the service port // The format of the error shall comply with the following rules: // - built-in error values shall be specified in this file and those shall use // CamelCase names @@ -242,7 +242,7 @@ message IngressPortStatus { // the related backend services. Incoming requests are first evaluated for a host // match, then routed to the backend associated with the matching IngressRuleValue. message IngressRule { - // Host is the fully qualified domain name of a network host, as defined by RFC 3986. + // host is the fully qualified domain name of a network host, as defined by RFC 3986. // Note the following deviations from the "host" part of the // URI as defined in RFC 3986: // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to @@ -255,7 +255,7 @@ message IngressRule { // IngressRuleValue. If the host is unspecified, the Ingress routes all // traffic based on the specified IngressRuleValue. // - // Host can be "precise" which is a domain name without the terminating dot of + // host can be "precise" which is a domain name without the terminating dot of // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name // prefixed with a single wildcard label (e.g. "*.foo.com"). // The wildcard character '*' must appear by itself as the first DNS label and @@ -287,7 +287,7 @@ message IngressRuleValue { // IngressSpec describes the Ingress the user wishes to exist. message IngressSpec { - // IngressClassName is the name of the IngressClass cluster resource. The + // ingressClassName is the name of the IngressClass cluster resource. The // associated IngressClass defines which controller will implement the // resource. This replaces the deprecated `kubernetes.io/ingress.class` // annotation. For backwards compatibility, when that annotation is set, it @@ -300,44 +300,44 @@ message IngressSpec { // +optional optional string ingressClassName = 4; - // A default backend capable of servicing requests that don't match any + // backend is the default backend capable of servicing requests that don't match any // rule. At least one of 'backend' or 'rules' must be specified. This field // is optional to allow the loadbalancer controller or defaulting logic to // specify a global default. // +optional optional IngressBackend backend = 1; - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified + // tls represents the TLS configuration. Currently the Ingress only supports a + // single TLS port, 443. If multiple members of this list specify different hosts, + // they will be multiplexed on the same port according to the hostname specified // through the SNI TLS extension, if the ingress controller fulfilling the // ingress supports SNI. // +optional repeated IngressTLS tls = 2; - // A list of host rules used to configure the Ingress. If unspecified, or + // rules is a list of host rules used to configure the Ingress. If unspecified, or // no rule matches, all traffic is sent to the default backend. // +optional repeated IngressRule rules = 3; } -// IngressStatus describe the current state of the Ingress. +// IngressStatus describes the current state of the Ingress. message IngressStatus { - // LoadBalancer contains the current status of the load-balancer. + // loadBalancer contains the current status of the load-balancer. // +optional optional IngressLoadBalancerStatus loadBalancer = 1; } // IngressTLS describes the transport layer security associated with an Ingress. message IngressTLS { - // Hosts are a list of hosts included in the TLS certificate. The values in + // hosts is a list of hosts included in the TLS certificate. The values in // this list must match the name/s used in the tlsSecret. Defaults to the // wildcard host setting for the loadbalancer controller fulfilling this // Ingress, if left unspecified. // +optional repeated string hosts = 1; - // SecretName is the name of the secret used to terminate TLS traffic on + // secretName is the name of the secret used to terminate TLS traffic on // port 443. Field is left optional to allow TLS routing based on SNI // hostname alone. If the SNI host in a listener conflicts with the "Host" // header field used by an IngressRule, the SNI host is used for termination diff --git a/cluster-autoscaler/vendor/k8s.io/api/networking/v1beta1/types.go b/cluster-autoscaler/vendor/k8s.io/api/networking/v1beta1/types.go index 49c82123d0cc..87cc91654b83 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/networking/v1beta1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/networking/v1beta1/types.go @@ -34,17 +34,18 @@ import ( // based virtual hosting etc. type Ingress struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec is the desired state of the Ingress. + // spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Status is the current state of the Ingress. + // status is the current state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` @@ -58,18 +59,19 @@ type Ingress struct { // IngressList is a collection of Ingress. type IngressList struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of Ingress. + // items is the list of Ingress. Items []Ingress `json:"items" protobuf:"bytes,2,rep,name=items"` } // IngressSpec describes the Ingress the user wishes to exist. type IngressSpec struct { - // IngressClassName is the name of the IngressClass cluster resource. The + // ingressClassName is the name of the IngressClass cluster resource. The // associated IngressClass defines which controller will implement the // resource. This replaces the deprecated `kubernetes.io/ingress.class` // annotation. For backwards compatibility, when that annotation is set, it @@ -82,22 +84,22 @@ type IngressSpec struct { // +optional IngressClassName *string `json:"ingressClassName,omitempty" protobuf:"bytes,4,opt,name=ingressClassName"` - // A default backend capable of servicing requests that don't match any + // backend is the default backend capable of servicing requests that don't match any // rule. At least one of 'backend' or 'rules' must be specified. This field // is optional to allow the loadbalancer controller or defaulting logic to // specify a global default. // +optional Backend *IngressBackend `json:"backend,omitempty" protobuf:"bytes,1,opt,name=backend"` - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified + // tls represents the TLS configuration. Currently the Ingress only supports a + // single TLS port, 443. If multiple members of this list specify different hosts, + // they will be multiplexed on the same port according to the hostname specified // through the SNI TLS extension, if the ingress controller fulfilling the // ingress supports SNI. // +optional TLS []IngressTLS `json:"tls,omitempty" protobuf:"bytes,2,rep,name=tls"` - // A list of host rules used to configure the Ingress. If unspecified, or + // rules is a list of host rules used to configure the Ingress. If unspecified, or // no rule matches, all traffic is sent to the default backend. // +optional Rules []IngressRule `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` @@ -106,13 +108,14 @@ type IngressSpec struct { // IngressTLS describes the transport layer security associated with an Ingress. type IngressTLS struct { - // Hosts are a list of hosts included in the TLS certificate. The values in + // hosts is a list of hosts included in the TLS certificate. The values in // this list must match the name/s used in the tlsSecret. Defaults to the // wildcard host setting for the loadbalancer controller fulfilling this // Ingress, if left unspecified. // +optional Hosts []string `json:"hosts,omitempty" protobuf:"bytes,1,rep,name=hosts"` - // SecretName is the name of the secret used to terminate TLS traffic on + + // secretName is the name of the secret used to terminate TLS traffic on // port 443. Field is left optional to allow TLS routing based on SNI // hostname alone. If the SNI host in a listener conflicts with the "Host" // header field used by an IngressRule, the SNI host is used for termination @@ -122,31 +125,31 @@ type IngressTLS struct { // TODO: Consider specifying different modes of termination, protocols etc. } -// IngressStatus describe the current state of the Ingress. +// IngressStatus describes the current state of the Ingress. type IngressStatus struct { - // LoadBalancer contains the current status of the load-balancer. + // loadBalancer contains the current status of the load-balancer. // +optional LoadBalancer IngressLoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` } // LoadBalancerStatus represents the status of a load-balancer. type IngressLoadBalancerStatus struct { - // Ingress is a list containing ingress points for the load-balancer. + // ingress is a list containing ingress points for the load-balancer. // +optional Ingress []IngressLoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"` } // IngressLoadBalancerIngress represents the status of a load-balancer ingress point. type IngressLoadBalancerIngress struct { - // IP is set for load-balancer ingress points that are IP based. + // ip is set for load-balancer ingress points that are IP based. // +optional IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` - // Hostname is set for load-balancer ingress points that are DNS based. + // hostname is set for load-balancer ingress points that are DNS based. // +optional Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` - // Ports provides information about the ports exposed by this LoadBalancer. + // ports provides information about the ports exposed by this LoadBalancer. // +listType=atomic // +optional Ports []IngressPortStatus `json:"ports,omitempty" protobuf:"bytes,4,rep,name=ports"` @@ -154,14 +157,14 @@ type IngressLoadBalancerIngress struct { // IngressPortStatus represents the error condition of a service port type IngressPortStatus struct { - // Port is the port number of the ingress port. + // port is the port number of the ingress port. Port int32 `json:"port" protobuf:"varint,1,opt,name=port"` - // Protocol is the protocol of the ingress port. + // protocol is the protocol of the ingress port. // The supported values are: "TCP", "UDP", "SCTP" Protocol v1.Protocol `json:"protocol" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"` - // Error is to record the problem with the service port + // error is to record the problem with the service port // The format of the error shall comply with the following rules: // - built-in error values shall be specified in this file and those shall use // CamelCase names @@ -180,7 +183,7 @@ type IngressPortStatus struct { // the related backend services. Incoming requests are first evaluated for a host // match, then routed to the backend associated with the matching IngressRuleValue. type IngressRule struct { - // Host is the fully qualified domain name of a network host, as defined by RFC 3986. + // host is the fully qualified domain name of a network host, as defined by RFC 3986. // Note the following deviations from the "host" part of the // URI as defined in RFC 3986: // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to @@ -193,7 +196,7 @@ type IngressRule struct { // IngressRuleValue. If the host is unspecified, the Ingress routes all // traffic based on the specified IngressRuleValue. // - // Host can be "precise" which is a domain name without the terminating dot of + // host can be "precise" which is a domain name without the terminating dot of // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name // prefixed with a single wildcard label (e.g. "*.foo.com"). // The wildcard character '*' must appear by itself as the first DNS label and @@ -204,6 +207,7 @@ type IngressRule struct { // is to equal to the suffix (removing the first label) of the wildcard rule. // +optional Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` + // IngressRuleValue represents a rule to route requests for this IngressRule. // If unspecified, the rule defaults to a http catch-all. Whether that sends // just traffic matching the host to the default backend or all traffic to the @@ -234,7 +238,7 @@ type IngressRuleValue struct { // to match against everything after the last '/' and before the first '?' // or '#'. type HTTPIngressRuleValue struct { - // A collection of paths that map requests to backends. + // paths is a collection of paths that map requests to backends. Paths []HTTPIngressPath `json:"paths" protobuf:"bytes,1,rep,name=paths"` // TODO: Consider adding fields for ingress-type specific global // options usable by a loadbalancer, like http keep-alive. @@ -273,14 +277,14 @@ const ( // HTTPIngressPath associates a path with a backend. Incoming urls matching the // path are forwarded to the backend. type HTTPIngressPath struct { - // Path is matched against the path of an incoming request. Currently it can + // path is matched against the path of an incoming request. Currently it can // contain characters disallowed from the conventional "path" part of a URL // as defined by RFC 3986. Paths must begin with a '/' and must be present // when using PathType with value "Exact" or "Prefix". // +optional Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` - // PathType determines the interpretation of the Path matching. PathType can + // pathType determines the interpretation of the path matching. PathType can // be one of the following values: // * Exact: Matches the URL path exactly. // * Prefix: Matches based on a URL path prefix split by '/'. Matching is @@ -297,22 +301,22 @@ type HTTPIngressPath struct { // Defaults to ImplementationSpecific. PathType *PathType `json:"pathType,omitempty" protobuf:"bytes,3,opt,name=pathType"` - // Backend defines the referenced service endpoint to which the traffic + // backend defines the referenced service endpoint to which the traffic // will be forwarded to. Backend IngressBackend `json:"backend" protobuf:"bytes,2,opt,name=backend"` } // IngressBackend describes all endpoints for a given service and port. type IngressBackend struct { - // Specifies the name of the referenced service. + // serviceName specifies the name of the referenced service. // +optional ServiceName string `json:"serviceName,omitempty" protobuf:"bytes,1,opt,name=serviceName"` - // Specifies the port of the referenced service. + // servicePort Specifies the port of the referenced service. // +optional ServicePort intstr.IntOrString `json:"servicePort,omitempty" protobuf:"bytes,2,opt,name=servicePort"` - // Resource is an ObjectRef to another Kubernetes resource in the namespace + // resource is an ObjectRef to another Kubernetes resource in the namespace // of the Ingress object. If resource is specified, serviceName and servicePort // must not be specified. // +optional @@ -333,12 +337,13 @@ type IngressBackend struct { // resources without a class specified will be assigned this default class. type IngressClass struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec is the desired state of the IngressClass. + // spec is the desired state of the IngressClass. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec IngressClassSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -346,15 +351,15 @@ type IngressClass struct { // IngressClassSpec provides information about the class of an Ingress. type IngressClassSpec struct { - // Controller refers to the name of the controller that should handle this + // controller refers to the name of the controller that should handle this // class. This allows for different "flavors" that are controlled by the - // same controller. For example, you may have different Parameters for the + // same controller. For example, you may have different parameters for the // same implementing controller. This should be specified as a // domain-prefixed path no more than 250 characters in length, e.g. // "acme.io/ingress-controller". This field is immutable. Controller string `json:"controller,omitempty" protobuf:"bytes,1,opt,name=controller"` - // Parameters is a link to a custom resource containing additional + // parameters is a link to a custom resource containing additional // configuration for the controller. This is optional if the controller does // not require extra parameters. // +optional @@ -373,19 +378,23 @@ const ( // IngressClassParametersReference identifies an API object. This can be used // to specify a cluster or namespace-scoped resource. type IngressClassParametersReference struct { - // APIGroup is the group for the resource being referenced. If APIGroup is + // apiGroup is the group for the resource being referenced. If APIGroup is // not specified, the specified Kind must be in the core API group. For any // other third-party types, APIGroup is required. // +optional APIGroup *string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=aPIGroup"` - // Kind is the type of resource being referenced. + + // kind is the type of resource being referenced. Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` - // Name is the name of resource being referenced. + + // name is the name of resource being referenced. Name string `json:"name" protobuf:"bytes,3,opt,name=name"` - // Scope represents if this refers to a cluster or namespace scoped resource. + + // scope represents if this refers to a cluster or namespace scoped resource. // This may be set to "Cluster" (default) or "Namespace". Scope *string `json:"scope" protobuf:"bytes,4,opt,name=scope"` - // Namespace is the namespace of the resource being referenced. This field is + + // namespace is the namespace of the resource being referenced. This field is // required when scope is set to "Namespace" and must be unset when scope is set to // "Cluster". // +optional @@ -404,6 +413,6 @@ type IngressClassList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of IngressClasses. + // items is the list of IngressClasses. Items []IngressClass `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/cluster-autoscaler/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go index 195d535c571f..b2373669fecb 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go @@ -24,14 +24,14 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_HTTPIngressPath = map[string]string{ "": "HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.", - "path": "Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value \"Exact\" or \"Prefix\".", - "pathType": "PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types. Defaults to ImplementationSpecific.", - "backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.", + "path": "path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value \"Exact\" or \"Prefix\".", + "pathType": "pathType determines the interpretation of the path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types. Defaults to ImplementationSpecific.", + "backend": "backend defines the referenced service endpoint to which the traffic will be forwarded to.", } func (HTTPIngressPath) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (HTTPIngressPath) SwaggerDoc() map[string]string { var map_HTTPIngressRuleValue = map[string]string{ "": "HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http:///? -> backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.", - "paths": "A collection of paths that map requests to backends.", + "paths": "paths is a collection of paths that map requests to backends.", } func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { @@ -50,8 +50,8 @@ func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { var map_Ingress = map[string]string{ "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Ingress) SwaggerDoc() map[string]string { @@ -60,9 +60,9 @@ func (Ingress) SwaggerDoc() map[string]string { var map_IngressBackend = map[string]string{ "": "IngressBackend describes all endpoints for a given service and port.", - "serviceName": "Specifies the name of the referenced service.", - "servicePort": "Specifies the port of the referenced service.", - "resource": "Resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, serviceName and servicePort must not be specified.", + "serviceName": "serviceName specifies the name of the referenced service.", + "servicePort": "servicePort Specifies the port of the referenced service.", + "resource": "resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, serviceName and servicePort must not be specified.", } func (IngressBackend) SwaggerDoc() map[string]string { @@ -72,7 +72,7 @@ func (IngressBackend) SwaggerDoc() map[string]string { var map_IngressClass = map[string]string{ "": "IngressClass represents the class of the Ingress, referenced by the Ingress Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be used to indicate that an IngressClass should be considered default. When a single IngressClass resource has this annotation set to true, new Ingress resources without a class specified will be assigned this default class.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (IngressClass) SwaggerDoc() map[string]string { @@ -82,7 +82,7 @@ func (IngressClass) SwaggerDoc() map[string]string { var map_IngressClassList = map[string]string{ "": "IngressClassList is a collection of IngressClasses.", "metadata": "Standard list metadata.", - "items": "Items is the list of IngressClasses.", + "items": "items is the list of IngressClasses.", } func (IngressClassList) SwaggerDoc() map[string]string { @@ -91,11 +91,11 @@ func (IngressClassList) SwaggerDoc() map[string]string { var map_IngressClassParametersReference = map[string]string{ "": "IngressClassParametersReference identifies an API object. This can be used to specify a cluster or namespace-scoped resource.", - "apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", - "kind": "Kind is the type of resource being referenced.", - "name": "Name is the name of resource being referenced.", - "scope": "Scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\".", - "namespace": "Namespace is the namespace of the resource being referenced. This field is required when scope is set to \"Namespace\" and must be unset when scope is set to \"Cluster\".", + "apiGroup": "apiGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + "kind": "kind is the type of resource being referenced.", + "name": "name is the name of resource being referenced.", + "scope": "scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\".", + "namespace": "namespace is the namespace of the resource being referenced. This field is required when scope is set to \"Namespace\" and must be unset when scope is set to \"Cluster\".", } func (IngressClassParametersReference) SwaggerDoc() map[string]string { @@ -104,8 +104,8 @@ func (IngressClassParametersReference) SwaggerDoc() map[string]string { var map_IngressClassSpec = map[string]string{ "": "IngressClassSpec provides information about the class of an Ingress.", - "controller": "Controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable.", - "parameters": "Parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters.", + "controller": "controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable.", + "parameters": "parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters.", } func (IngressClassSpec) SwaggerDoc() map[string]string { @@ -115,7 +115,7 @@ func (IngressClassSpec) SwaggerDoc() map[string]string { var map_IngressList = map[string]string{ "": "IngressList is a collection of Ingress.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of Ingress.", + "items": "items is the list of Ingress.", } func (IngressList) SwaggerDoc() map[string]string { @@ -124,9 +124,9 @@ func (IngressList) SwaggerDoc() map[string]string { var map_IngressLoadBalancerIngress = map[string]string{ "": "IngressLoadBalancerIngress represents the status of a load-balancer ingress point.", - "ip": "IP is set for load-balancer ingress points that are IP based.", - "hostname": "Hostname is set for load-balancer ingress points that are DNS based.", - "ports": "Ports provides information about the ports exposed by this LoadBalancer.", + "ip": "ip is set for load-balancer ingress points that are IP based.", + "hostname": "hostname is set for load-balancer ingress points that are DNS based.", + "ports": "ports provides information about the ports exposed by this LoadBalancer.", } func (IngressLoadBalancerIngress) SwaggerDoc() map[string]string { @@ -135,7 +135,7 @@ func (IngressLoadBalancerIngress) SwaggerDoc() map[string]string { var map_IngressLoadBalancerStatus = map[string]string{ "": "LoadBalancerStatus represents the status of a load-balancer.", - "ingress": "Ingress is a list containing ingress points for the load-balancer.", + "ingress": "ingress is a list containing ingress points for the load-balancer.", } func (IngressLoadBalancerStatus) SwaggerDoc() map[string]string { @@ -144,9 +144,9 @@ func (IngressLoadBalancerStatus) SwaggerDoc() map[string]string { var map_IngressPortStatus = map[string]string{ "": "IngressPortStatus represents the error condition of a service port", - "port": "Port is the port number of the ingress port.", - "protocol": "Protocol is the protocol of the ingress port. The supported values are: \"TCP\", \"UDP\", \"SCTP\"", - "error": "Error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.", + "port": "port is the port number of the ingress port.", + "protocol": "protocol is the protocol of the ingress port. The supported values are: \"TCP\", \"UDP\", \"SCTP\"", + "error": "error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.", } func (IngressPortStatus) SwaggerDoc() map[string]string { @@ -155,7 +155,7 @@ func (IngressPortStatus) SwaggerDoc() map[string]string { var map_IngressRule = map[string]string{ "": "IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.", - "host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nHost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.", + "host": "host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nhost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.", } func (IngressRule) SwaggerDoc() map[string]string { @@ -172,10 +172,10 @@ func (IngressRuleValue) SwaggerDoc() map[string]string { var map_IngressSpec = map[string]string{ "": "IngressSpec describes the Ingress the user wishes to exist.", - "ingressClassName": "IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation.", - "backend": "A default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.", - "tls": "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", - "rules": "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", + "ingressClassName": "ingressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation.", + "backend": "backend is the default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.", + "tls": "tls represents the TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", + "rules": "rules is a list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", } func (IngressSpec) SwaggerDoc() map[string]string { @@ -183,8 +183,8 @@ func (IngressSpec) SwaggerDoc() map[string]string { } var map_IngressStatus = map[string]string{ - "": "IngressStatus describe the current state of the Ingress.", - "loadBalancer": "LoadBalancer contains the current status of the load-balancer.", + "": "IngressStatus describes the current state of the Ingress.", + "loadBalancer": "loadBalancer contains the current status of the load-balancer.", } func (IngressStatus) SwaggerDoc() map[string]string { @@ -193,8 +193,8 @@ func (IngressStatus) SwaggerDoc() map[string]string { var map_IngressTLS = map[string]string{ "": "IngressTLS describes the transport layer security associated with an Ingress.", - "hosts": "Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", - "secretName": "SecretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.", + "hosts": "hosts is a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", + "secretName": "secretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.", } func (IngressTLS) SwaggerDoc() map[string]string { diff --git a/cluster-autoscaler/vendor/k8s.io/api/node/v1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/node/v1/generated.proto index 294be85b6248..0152d5e3aba1 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/node/v1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/node/v1/generated.proto @@ -32,7 +32,7 @@ option go_package = "k8s.io/api/node/v1"; // Overhead structure represents the resource overhead associated with running a pod. message Overhead { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional map podFixed = 1; } @@ -49,7 +49,7 @@ message RuntimeClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Handler specifies the underlying runtime and configuration that the CRI + // handler specifies the underlying runtime and configuration that the CRI // implementation will use to handle pods of this class. The possible values // are specific to the node & CRI configuration. It is assumed that all // handlers are available on every node, and handlers of the same name are @@ -61,13 +61,13 @@ message RuntimeClass { // and is immutable. optional string handler = 2; - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/ // +optional optional Overhead overhead = 3; - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. @@ -82,7 +82,7 @@ message RuntimeClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated RuntimeClass items = 2; } diff --git a/cluster-autoscaler/vendor/k8s.io/api/node/v1/types.go b/cluster-autoscaler/vendor/k8s.io/api/node/v1/types.go index 984696d98373..b00f58772cdd 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/node/v1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/node/v1/types.go @@ -34,11 +34,12 @@ import ( // https://kubernetes.io/docs/concepts/containers/runtime-class/ type RuntimeClass struct { metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Handler specifies the underlying runtime and configuration that the CRI + // handler specifies the underlying runtime and configuration that the CRI // implementation will use to handle pods of this class. The possible values // are specific to the node & CRI configuration. It is assumed that all // handlers are available on every node, and handlers of the same name are @@ -50,13 +51,13 @@ type RuntimeClass struct { // and is immutable. Handler string `json:"handler" protobuf:"bytes,2,opt,name=handler"` - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/ // +optional Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,3,opt,name=overhead"` - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. @@ -66,7 +67,7 @@ type RuntimeClass struct { // Overhead structure represents the resource overhead associated with running a pod. type Overhead struct { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional PodFixed corev1.ResourceList `json:"podFixed,omitempty" protobuf:"bytes,1,opt,name=podFixed,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` } @@ -96,11 +97,12 @@ type Scheduling struct { // RuntimeClassList is a list of RuntimeClass objects. type RuntimeClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []RuntimeClass `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/cluster-autoscaler/vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go index a9eddc60ea32..f5e6b327794d 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go @@ -24,12 +24,12 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Overhead = map[string]string{ "": "Overhead structure represents the resource overhead associated with running a pod.", - "podFixed": "PodFixed represents the fixed resource overhead associated with running a pod.", + "podFixed": "podFixed represents the fixed resource overhead associated with running a pod.", } func (Overhead) SwaggerDoc() map[string]string { @@ -39,9 +39,9 @@ func (Overhead) SwaggerDoc() map[string]string { var map_RuntimeClass = map[string]string{ "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://kubernetes.io/docs/concepts/containers/runtime-class/", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", - "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see\n https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/", - "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", + "handler": "handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", + "overhead": "overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see\n https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/", + "scheduling": "scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", } func (RuntimeClass) SwaggerDoc() map[string]string { @@ -51,7 +51,7 @@ func (RuntimeClass) SwaggerDoc() map[string]string { var map_RuntimeClassList = map[string]string{ "": "RuntimeClassList is a list of RuntimeClass objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (RuntimeClassList) SwaggerDoc() map[string]string { diff --git a/cluster-autoscaler/vendor/k8s.io/api/node/v1alpha1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/node/v1alpha1/generated.proto index d46e0ec6aa1a..4673e9261d8d 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/node/v1alpha1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/node/v1alpha1/generated.proto @@ -32,7 +32,7 @@ option go_package = "k8s.io/api/node/v1alpha1"; // Overhead structure represents the resource overhead associated with running a pod. message Overhead { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional map podFixed = 1; } @@ -49,7 +49,7 @@ message RuntimeClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the RuntimeClass + // spec represents specification of the RuntimeClass // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status optional RuntimeClassSpec spec = 2; } @@ -61,7 +61,7 @@ message RuntimeClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated RuntimeClass items = 2; } @@ -70,7 +70,7 @@ message RuntimeClassList { // Interface (CRI) implementation, as well as any other components that need to // understand how the pod will be run. The RuntimeClassSpec is immutable. message RuntimeClassSpec { - // RuntimeHandler specifies the underlying runtime and configuration that the + // runtimeHandler specifies the underlying runtime and configuration that the // CRI implementation will use to handle pods of this class. The possible // values are specific to the node & CRI configuration. It is assumed that // all handlers are available on every node, and handlers of the same name are @@ -78,17 +78,17 @@ message RuntimeClassSpec { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The RuntimeHandler must be lowercase, conform to the DNS Label (RFC 1123) + // The runtimeHandler must be lowercase, conform to the DNS Label (RFC 1123) // requirements, and is immutable. optional string runtimeHandler = 1; - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md // +optional optional Overhead overhead = 2; - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. diff --git a/cluster-autoscaler/vendor/k8s.io/api/node/v1alpha1/types.go b/cluster-autoscaler/vendor/k8s.io/api/node/v1alpha1/types.go index 588c8e4c0a72..bf9e284bf732 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/node/v1alpha1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/node/v1alpha1/types.go @@ -34,11 +34,12 @@ import ( // https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class type RuntimeClass struct { metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the RuntimeClass + // spec represents specification of the RuntimeClass // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status Spec RuntimeClassSpec `json:"spec" protobuf:"bytes,2,name=spec"` } @@ -48,7 +49,7 @@ type RuntimeClass struct { // Interface (CRI) implementation, as well as any other components that need to // understand how the pod will be run. The RuntimeClassSpec is immutable. type RuntimeClassSpec struct { - // RuntimeHandler specifies the underlying runtime and configuration that the + // runtimeHandler specifies the underlying runtime and configuration that the // CRI implementation will use to handle pods of this class. The possible // values are specific to the node & CRI configuration. It is assumed that // all handlers are available on every node, and handlers of the same name are @@ -56,17 +57,17 @@ type RuntimeClassSpec struct { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The RuntimeHandler must be lowercase, conform to the DNS Label (RFC 1123) + // The runtimeHandler must be lowercase, conform to the DNS Label (RFC 1123) // requirements, and is immutable. RuntimeHandler string `json:"runtimeHandler" protobuf:"bytes,1,opt,name=runtimeHandler"` - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md // +optional Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,2,opt,name=overhead"` - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. @@ -76,7 +77,7 @@ type RuntimeClassSpec struct { // Overhead structure represents the resource overhead associated with running a pod. type Overhead struct { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional PodFixed corev1.ResourceList `json:"podFixed,omitempty" protobuf:"bytes,1,opt,name=podFixed,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` } @@ -106,11 +107,12 @@ type Scheduling struct { // RuntimeClassList is a list of RuntimeClass objects. type RuntimeClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []RuntimeClass `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/cluster-autoscaler/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go index 96413754f050..ccc1b7085392 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go @@ -24,12 +24,12 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Overhead = map[string]string{ "": "Overhead structure represents the resource overhead associated with running a pod.", - "podFixed": "PodFixed represents the fixed resource overhead associated with running a pod.", + "podFixed": "podFixed represents the fixed resource overhead associated with running a pod.", } func (Overhead) SwaggerDoc() map[string]string { @@ -39,7 +39,7 @@ func (Overhead) SwaggerDoc() map[string]string { var map_RuntimeClass = map[string]string{ "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the RuntimeClass More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec represents specification of the RuntimeClass More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (RuntimeClass) SwaggerDoc() map[string]string { @@ -49,7 +49,7 @@ func (RuntimeClass) SwaggerDoc() map[string]string { var map_RuntimeClassList = map[string]string{ "": "RuntimeClassList is a list of RuntimeClass objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (RuntimeClassList) SwaggerDoc() map[string]string { @@ -58,9 +58,9 @@ func (RuntimeClassList) SwaggerDoc() map[string]string { var map_RuntimeClassSpec = map[string]string{ "": "RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters that are required to describe the RuntimeClass to the Container Runtime Interface (CRI) implementation, as well as any other components that need to understand how the pod will be run. The RuntimeClassSpec is immutable.", - "runtimeHandler": "RuntimeHandler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The RuntimeHandler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", - "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md", - "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", + "runtimeHandler": "runtimeHandler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The runtimeHandler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", + "overhead": "overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md", + "scheduling": "scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", } func (RuntimeClassSpec) SwaggerDoc() map[string]string { diff --git a/cluster-autoscaler/vendor/k8s.io/api/node/v1beta1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/node/v1beta1/generated.proto index 8ffad6973142..54dbc0995ac4 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/node/v1beta1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/node/v1beta1/generated.proto @@ -32,7 +32,7 @@ option go_package = "k8s.io/api/node/v1beta1"; // Overhead structure represents the resource overhead associated with running a pod. message Overhead { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional map podFixed = 1; } @@ -49,7 +49,7 @@ message RuntimeClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Handler specifies the underlying runtime and configuration that the CRI + // handler specifies the underlying runtime and configuration that the CRI // implementation will use to handle pods of this class. The possible values // are specific to the node & CRI configuration. It is assumed that all // handlers are available on every node, and handlers of the same name are @@ -57,17 +57,17 @@ message RuntimeClass { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, + // The handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, // and is immutable. optional string handler = 2; - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md // +optional optional Overhead overhead = 3; - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. @@ -82,7 +82,7 @@ message RuntimeClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated RuntimeClass items = 2; } diff --git a/cluster-autoscaler/vendor/k8s.io/api/node/v1beta1/types.go b/cluster-autoscaler/vendor/k8s.io/api/node/v1beta1/types.go index b924cb421ad5..74ecca26ad4c 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/node/v1beta1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/node/v1beta1/types.go @@ -36,11 +36,12 @@ import ( // https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class type RuntimeClass struct { metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Handler specifies the underlying runtime and configuration that the CRI + // handler specifies the underlying runtime and configuration that the CRI // implementation will use to handle pods of this class. The possible values // are specific to the node & CRI configuration. It is assumed that all // handlers are available on every node, and handlers of the same name are @@ -48,17 +49,17 @@ type RuntimeClass struct { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, + // The handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, // and is immutable. Handler string `json:"handler" protobuf:"bytes,2,opt,name=handler"` - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md // +optional Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,3,opt,name=overhead"` - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. @@ -68,7 +69,7 @@ type RuntimeClass struct { // Overhead structure represents the resource overhead associated with running a pod. type Overhead struct { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional PodFixed corev1.ResourceList `json:"podFixed,omitempty" protobuf:"bytes,1,opt,name=podFixed,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` } @@ -100,11 +101,12 @@ type Scheduling struct { // RuntimeClassList is a list of RuntimeClass objects. type RuntimeClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []RuntimeClass `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/cluster-autoscaler/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go index fec4398b2e71..086105ecc5fd 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go @@ -24,12 +24,12 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Overhead = map[string]string{ "": "Overhead structure represents the resource overhead associated with running a pod.", - "podFixed": "PodFixed represents the fixed resource overhead associated with running a pod.", + "podFixed": "podFixed represents the fixed resource overhead associated with running a pod.", } func (Overhead) SwaggerDoc() map[string]string { @@ -39,9 +39,9 @@ func (Overhead) SwaggerDoc() map[string]string { var map_RuntimeClass = map[string]string{ "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", - "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md", - "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", + "handler": "handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", + "overhead": "overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md", + "scheduling": "scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", } func (RuntimeClass) SwaggerDoc() map[string]string { @@ -51,7 +51,7 @@ func (RuntimeClass) SwaggerDoc() map[string]string { var map_RuntimeClassList = map[string]string{ "": "RuntimeClassList is a list of RuntimeClass objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (RuntimeClassList) SwaggerDoc() map[string]string { diff --git a/cluster-autoscaler/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go index 582b28c15b88..36c01cf2ca44 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Eviction = map[string]string{ diff --git a/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go index cebba07f47a9..3e1c7983fc45 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AllowedCSIDriver = map[string]string{ diff --git a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go index 63aa4ed7b663..370398198bca 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AggregationRule = map[string]string{ diff --git a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go index 08578aba92d4..6708f3e58e3f 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AggregationRule = map[string]string{ diff --git a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go index db9525832be5..fff1fe40fab1 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AggregationRule = map[string]string{ diff --git a/cluster-autoscaler/vendor/k8s.io/api/resource/v1alpha1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/resource/v1alpha1/types_swagger_doc_generated.go index 6836dbfb6e6c..4c2d1b7b23dd 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/resource/v1alpha1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/resource/v1alpha1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AllocationResult = map[string]string{ diff --git a/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1/generated.proto index afc090777dec..c1a27e8baa8b 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1/generated.proto @@ -37,7 +37,7 @@ message PriorityClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. optional int32 value = 2; @@ -54,7 +54,7 @@ message PriorityClass { // +optional optional string description = 4; - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. // +optional diff --git a/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1/types.go b/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1/types.go index 0f2989424efa..146bae40d3be 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1/types.go @@ -34,7 +34,7 @@ type PriorityClass struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. Value int32 `json:"value" protobuf:"bytes,2,opt,name=value"` @@ -51,7 +51,7 @@ type PriorityClass struct { // +optional Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. // +optional diff --git a/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go index ac34c531fb9d..f167e19707b2 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go @@ -24,16 +24,16 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PriorityClass = map[string]string{ "": "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "value": "value represents the integer value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", - "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", + "preemptionPolicy": "preemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", } func (PriorityClass) SwaggerDoc() map[string]string { diff --git a/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto index 5c60b7ab4c78..f0878fb16e0a 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto @@ -38,7 +38,7 @@ message PriorityClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. optional int32 value = 2; @@ -55,7 +55,7 @@ message PriorityClass { // +optional optional string description = 4; - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. // +optional diff --git a/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1alpha1/types.go b/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1alpha1/types.go index 7b0df4864600..26ba8ff5dcc1 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1alpha1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1alpha1/types.go @@ -35,7 +35,7 @@ type PriorityClass struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. Value int32 `json:"value" protobuf:"bytes,2,opt,name=value"` @@ -52,7 +52,7 @@ type PriorityClass struct { // +optional Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. // +optional diff --git a/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go index fa25f969c437..557005db64e0 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go @@ -24,16 +24,16 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PriorityClass = map[string]string{ "": "DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "value": "value represents the integer value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", - "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", + "preemptionPolicy": "preemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", } func (PriorityClass) SwaggerDoc() map[string]string { diff --git a/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1beta1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1beta1/generated.proto index 44b49ea24638..43878184d6e5 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1beta1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1beta1/generated.proto @@ -38,7 +38,7 @@ message PriorityClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. optional int32 value = 2; @@ -55,7 +55,7 @@ message PriorityClass { // +optional optional string description = 4; - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. // +optional diff --git a/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1beta1/types.go b/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1beta1/types.go index e315e1b35949..6f88592cf20a 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1beta1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1beta1/types.go @@ -39,7 +39,7 @@ type PriorityClass struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. Value int32 `json:"value" protobuf:"bytes,2,opt,name=value"` @@ -56,7 +56,7 @@ type PriorityClass struct { // +optional Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. // +optional diff --git a/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go index cbc140f446bc..f42008eb91f5 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go @@ -24,16 +24,16 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PriorityClass = map[string]string{ "": "DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "value": "value represents the integer value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", - "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", + "preemptionPolicy": "preemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", } func (PriorityClass) SwaggerDoc() map[string]string { diff --git a/cluster-autoscaler/vendor/k8s.io/api/storage/v1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/storage/v1/generated.proto index d3c425c0419a..ff52fae7293f 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/storage/v1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/storage/v1/generated.proto @@ -46,7 +46,7 @@ message CSIDriver { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the CSI Driver. + // spec represents the specification of the CSI Driver. optional CSIDriverSpec spec = 2; } @@ -79,16 +79,15 @@ message CSIDriverSpec { // +optional optional bool attachRequired = 1; - // If set to true, podInfoOnMount indicates this CSI volume driver - // requires additional pod information (like podName, podUID, etc.) during - // mount operations. + // podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) + // during mount operations, if set to true. // If set to false, pod information will not be passed on mount. // Default is false. + // // The CSI driver specifies podInfoOnMount as part of driver deployment. - // If true, Kubelet will pass pod information as VolumeContext in the CSI - // NodePublishVolume() calls. - // The CSI driver is responsible for parsing and validating the information - // passed in as VolumeContext. + // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. + // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. + // // The following VolumeConext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name @@ -110,29 +109,27 @@ message CSIDriverSpec { optional bool podInfoOnMount = 2; // volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. - // The default if the list is empty is "Persistent", which is the usage - // defined by the CSI specification and implemented in Kubernetes via the usual - // PV/PVC mechanism. - // The other mode is "Ephemeral". In this mode, volumes are defined inline - // inside the pod spec with CSIVolumeSource and their lifecycle is tied to - // the lifecycle of that pod. A driver has to be aware of this - // because it is only going to get a NodePublishVolume call for such a volume. + // The default if the list is empty is "Persistent", which is the usage defined by the + // CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. + // + // The other mode is "Ephemeral". In this mode, volumes are defined inline inside the pod spec + // with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. + // A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. + // // For more information about implementing this mode, see // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html - // A driver can support one or more of these modes and - // more modes may be added in the future. - // This field is beta. + // A driver can support one or more of these modes and more modes may be added in the future. // + // This field is beta. // This field is immutable. // // +optional // +listType=set repeated string volumeLifecycleModes = 3; - // If set to true, storageCapacity indicates that the CSI - // volume driver wants pod scheduling to consider the storage + // storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage // capacity that the driver deployment will report by creating - // CSIStorageCapacity objects with capacity information. + // CSIStorageCapacity objects with capacity information, if set to true. // // The check can be enabled immediately when deploying a driver. // In that case, provisioning new volumes with late binding @@ -149,7 +146,7 @@ message CSIDriverSpec { // +featureGate=CSIStorageCapacity optional bool storageCapacity = 4; - // Defines if the underlying volume supports changing ownership and + // fsGroupPolicy defines if the underlying volume supports changing ownership and // permission of the volume before being mounted. // Refer to the specific FSGroupPolicy values for additional details. // @@ -159,10 +156,11 @@ message CSIDriverSpec { // to determine if Kubernetes should modify ownership and permissions of the volume. // With the default policy the defined fsGroup will only be applied // if a fstype is defined and the volume's access mode contains ReadWriteOnce. + // // +optional optional string fsGroupPolicy = 5; - // TokenRequests indicates the CSI driver needs pods' service account + // tokenRequests indicates the CSI driver needs pods' service account // tokens it is mounting volume for to do necessary authentication. Kubelet // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. // The CSI driver should parse and validate the following VolumeContext: @@ -182,7 +180,7 @@ message CSIDriverSpec { // +listType=atomic repeated TokenRequest tokenRequests = 6; - // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // requiresRepublish indicates the CSI driver wants `NodePublishVolume` // being periodically called to reflect any possible change in the mounted // volume. This field defaults to false. // @@ -193,7 +191,7 @@ message CSIDriverSpec { // +optional optional bool requiresRepublish = 7; - // SELinuxMount specifies if the CSI driver supports "-o context" + // seLinuxMount specifies if the CSI driver supports "-o context" // mount option. // // When "true", the CSI driver must ensure that all volumes provided by this CSI @@ -225,6 +223,7 @@ message CSIDriverSpec { // enough that it doesn't create this object. // CSINode has an OwnerReference that points to the corresponding node object. message CSINode { + // Standard object's metadata. // metadata.name must be the Kubernetes node name. optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -234,7 +233,7 @@ message CSINode { // CSINodeDriver holds information about the specification of one CSI driver installed on a node message CSINodeDriver { - // This is the name of the CSI driver that this object refers to. + // name represents the name of the CSI driver that this object refers to. // This MUST be the same name returned by the CSI GetPluginName() call for // that driver. optional string name = 1; @@ -314,11 +313,11 @@ message CSINodeSpec { // the scheduler assumes that capacity is insufficient and tries some other // node. message CSIStorageCapacity { - // Standard object's metadata. The name has no particular meaning. It must be - // be a DNS subdomain (dots allowed, 253 characters). To ensure that - // there are no conflicts with other CSI drivers on the cluster, the recommendation - // is to use csisc-, a generated name, or a reverse-domain name which ends - // with the unique CSI driver name. + // Standard object's metadata. + // The name has no particular meaning. It must be a DNS subdomain (dots allowed, 253 characters). + // To ensure that there are no conflicts with other CSI drivers on the cluster, + // the recommendation is to use csisc-, a generated name, or a reverse-domain name + // which ends with the unique CSI driver name. // // Objects are namespaced. // @@ -326,7 +325,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // NodeTopology defines which nodes have access to the storage + // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is // not accessible from any node in the cluster. If empty, the // storage is accessible from all nodes. This field is @@ -335,7 +334,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; - // The name of the StorageClass that the reported capacity applies to. + // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass // object (non-empty, DNS subdomain). If that object no longer exists, // the CSIStorageCapacity object is obsolete and should be removed by its @@ -343,7 +342,7 @@ message CSIStorageCapacity { // This field is immutable. optional string storageClassName = 3; - // Capacity is the value reported by the CSI driver in its GetCapacityResponse + // capacity is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -355,7 +354,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; - // MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -377,7 +376,7 @@ message CSIStorageCapacityList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of CSIStorageCapacity objects. + // items is the list of CSIStorageCapacity objects. // +listType=map // +listMapKey=name repeated CSIStorageCapacity items = 2; @@ -394,36 +393,36 @@ message StorageClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Provisioner indicates the type of the provisioner. + // provisioner indicates the type of the provisioner. optional string provisioner = 2; - // Parameters holds the parameters for the provisioner that should + // parameters holds the parameters for the provisioner that should // create volumes of this storage class. // +optional map parameters = 3; - // Dynamically provisioned PersistentVolumes of this storage class are - // created with this reclaimPolicy. Defaults to Delete. + // reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. + // Defaults to Delete. // +optional optional string reclaimPolicy = 4; - // Dynamically provisioned PersistentVolumes of this storage class are - // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - + // mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. + // e.g. ["ro", "soft"]. Not validated - // mount of the PVs will simply fail if one is invalid. // +optional repeated string mountOptions = 5; - // AllowVolumeExpansion shows whether the storage class allow volume expand + // allowVolumeExpansion shows whether the storage class allow volume expand. // +optional optional bool allowVolumeExpansion = 6; - // VolumeBindingMode indicates how PersistentVolumeClaims should be + // volumeBindingMode indicates how PersistentVolumeClaims should be // provisioned and bound. When unset, VolumeBindingImmediate is used. // This field is only honored by servers that enable the VolumeScheduling feature. // +optional optional string volumeBindingMode = 7; - // Restrict the node topologies where volumes can be dynamically provisioned. + // allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. // Each volume plugin defines its own supported topology specifications. // An empty TopologySelectorTerm list means there is no topology restriction. // This field is only honored by servers that enable the VolumeScheduling feature. @@ -439,17 +438,17 @@ message StorageClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of StorageClasses + // items is the list of StorageClasses repeated StorageClass items = 2; } // TokenRequest contains parameters of a service account token. message TokenRequest { - // Audience is the intended audience of the token in "TokenRequestSpec". + // audience is the intended audience of the token in "TokenRequestSpec". // It will default to the audiences of kube apiserver. optional string audience = 1; - // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // expirationSeconds is the duration of validity of the token in "TokenRequestSpec". // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec". // // +optional @@ -466,11 +465,11 @@ message VolumeAttachment { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. optional VolumeAttachmentSpec spec = 2; - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -484,7 +483,7 @@ message VolumeAttachmentList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments repeated VolumeAttachment items = 2; } @@ -493,7 +492,7 @@ message VolumeAttachmentList { // in future we may allow also inline volumes in pods. // Exactly one member can be set. message VolumeAttachmentSource { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional optional string persistentVolumeName = 1; @@ -509,39 +508,39 @@ message VolumeAttachmentSource { // VolumeAttachmentSpec is the specification of a VolumeAttachment request. message VolumeAttachmentSpec { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). optional string attacher = 1; - // Source represents the volume that should be attached. + // source represents the volume that should be attached. optional VolumeAttachmentSource source = 2; - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. optional string nodeName = 3; } // VolumeAttachmentStatus is the status of a VolumeAttachment request. message VolumeAttachmentStatus { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. optional bool attached = 1; - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional map attachmentMetadata = 2; - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional optional VolumeError attachError = 3; - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional @@ -550,11 +549,11 @@ message VolumeAttachmentStatus { // VolumeError captures an error encountered during a volume operation. message VolumeError { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string may be logged, so it should not contain sensitive // information. // +optional @@ -563,7 +562,7 @@ message VolumeError { // VolumeNodeResources is a set of resource limits for scheduling of volumes. message VolumeNodeResources { - // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. // A volume that is both attached and mounted on a node is considered to be used once, not twice. // The same rule applies for a unique volume that is shared among multiple pods on the same node. // If this field is not specified, then the supported number of volumes on this node is unbounded. diff --git a/cluster-autoscaler/vendor/k8s.io/api/storage/v1/types.go b/cluster-autoscaler/vendor/k8s.io/api/storage/v1/types.go index f57099df6dcf..be45de9cf0e7 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/storage/v1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/storage/v1/types.go @@ -33,41 +33,42 @@ import ( // according to etcd is in ObjectMeta.Name. type StorageClass struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Provisioner indicates the type of the provisioner. + // provisioner indicates the type of the provisioner. Provisioner string `json:"provisioner" protobuf:"bytes,2,opt,name=provisioner"` - // Parameters holds the parameters for the provisioner that should + // parameters holds the parameters for the provisioner that should // create volumes of this storage class. // +optional Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` - // Dynamically provisioned PersistentVolumes of this storage class are - // created with this reclaimPolicy. Defaults to Delete. + // reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. + // Defaults to Delete. // +optional ReclaimPolicy *v1.PersistentVolumeReclaimPolicy `json:"reclaimPolicy,omitempty" protobuf:"bytes,4,opt,name=reclaimPolicy,casttype=k8s.io/api/core/v1.PersistentVolumeReclaimPolicy"` - // Dynamically provisioned PersistentVolumes of this storage class are - // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - + // mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. + // e.g. ["ro", "soft"]. Not validated - // mount of the PVs will simply fail if one is invalid. // +optional MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,5,opt,name=mountOptions"` - // AllowVolumeExpansion shows whether the storage class allow volume expand + // allowVolumeExpansion shows whether the storage class allow volume expand. // +optional AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty" protobuf:"varint,6,opt,name=allowVolumeExpansion"` - // VolumeBindingMode indicates how PersistentVolumeClaims should be + // volumeBindingMode indicates how PersistentVolumeClaims should be // provisioned and bound. When unset, VolumeBindingImmediate is used. // This field is only honored by servers that enable the VolumeScheduling feature. // +optional VolumeBindingMode *VolumeBindingMode `json:"volumeBindingMode,omitempty" protobuf:"bytes,7,opt,name=volumeBindingMode"` - // Restrict the node topologies where volumes can be dynamically provisioned. + // allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. // Each volume plugin defines its own supported topology specifications. // An empty TopologySelectorTerm list means there is no topology restriction. // This field is only honored by servers that enable the VolumeScheduling feature. @@ -81,12 +82,13 @@ type StorageClass struct { // StorageClassList is a collection of storage classes. type StorageClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of StorageClasses + // items is the list of StorageClasses Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -122,11 +124,11 @@ type VolumeAttachment struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -138,25 +140,26 @@ type VolumeAttachment struct { // VolumeAttachmentList is a collection of VolumeAttachment objects. type VolumeAttachmentList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"` } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. type VolumeAttachmentSpec struct { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"` - // Source represents the volume that should be attached. + // source represents the volume that should be attached. Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"` - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"` } @@ -165,7 +168,7 @@ type VolumeAttachmentSpec struct { // in future we may allow also inline volumes in pods. // Exactly one member can be set. type VolumeAttachmentSource struct { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` @@ -181,26 +184,26 @@ type VolumeAttachmentSource struct { // VolumeAttachmentStatus is the status of a VolumeAttachment request. type VolumeAttachmentStatus struct { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"` - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"` - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"` - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional @@ -209,11 +212,11 @@ type VolumeAttachmentStatus struct { // VolumeError captures an error encountered during a volume operation. type VolumeError struct { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string may be logged, so it should not contain sensitive // information. // +optional @@ -242,7 +245,7 @@ type CSIDriver struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the CSI Driver. + // spec represents the specification of the CSI Driver. Spec CSIDriverSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` } @@ -279,16 +282,15 @@ type CSIDriverSpec struct { // +optional AttachRequired *bool `json:"attachRequired,omitempty" protobuf:"varint,1,opt,name=attachRequired"` - // If set to true, podInfoOnMount indicates this CSI volume driver - // requires additional pod information (like podName, podUID, etc.) during - // mount operations. + // podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) + // during mount operations, if set to true. // If set to false, pod information will not be passed on mount. // Default is false. + // // The CSI driver specifies podInfoOnMount as part of driver deployment. - // If true, Kubelet will pass pod information as VolumeContext in the CSI - // NodePublishVolume() calls. - // The CSI driver is responsible for parsing and validating the information - // passed in as VolumeContext. + // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. + // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. + // // The following VolumeConext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name @@ -310,29 +312,27 @@ type CSIDriverSpec struct { PodInfoOnMount *bool `json:"podInfoOnMount,omitempty" protobuf:"bytes,2,opt,name=podInfoOnMount"` // volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. - // The default if the list is empty is "Persistent", which is the usage - // defined by the CSI specification and implemented in Kubernetes via the usual - // PV/PVC mechanism. - // The other mode is "Ephemeral". In this mode, volumes are defined inline - // inside the pod spec with CSIVolumeSource and their lifecycle is tied to - // the lifecycle of that pod. A driver has to be aware of this - // because it is only going to get a NodePublishVolume call for such a volume. + // The default if the list is empty is "Persistent", which is the usage defined by the + // CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. + // + // The other mode is "Ephemeral". In this mode, volumes are defined inline inside the pod spec + // with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. + // A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. + // // For more information about implementing this mode, see // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html - // A driver can support one or more of these modes and - // more modes may be added in the future. - // This field is beta. + // A driver can support one or more of these modes and more modes may be added in the future. // + // This field is beta. // This field is immutable. // // +optional // +listType=set VolumeLifecycleModes []VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty" protobuf:"bytes,3,opt,name=volumeLifecycleModes"` - // If set to true, storageCapacity indicates that the CSI - // volume driver wants pod scheduling to consider the storage + // storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage // capacity that the driver deployment will report by creating - // CSIStorageCapacity objects with capacity information. + // CSIStorageCapacity objects with capacity information, if set to true. // // The check can be enabled immediately when deploying a driver. // In that case, provisioning new volumes with late binding @@ -349,7 +349,7 @@ type CSIDriverSpec struct { // +featureGate=CSIStorageCapacity StorageCapacity *bool `json:"storageCapacity,omitempty" protobuf:"bytes,4,opt,name=storageCapacity"` - // Defines if the underlying volume supports changing ownership and + // fsGroupPolicy defines if the underlying volume supports changing ownership and // permission of the volume before being mounted. // Refer to the specific FSGroupPolicy values for additional details. // @@ -359,10 +359,11 @@ type CSIDriverSpec struct { // to determine if Kubernetes should modify ownership and permissions of the volume. // With the default policy the defined fsGroup will only be applied // if a fstype is defined and the volume's access mode contains ReadWriteOnce. + // // +optional FSGroupPolicy *FSGroupPolicy `json:"fsGroupPolicy,omitempty" protobuf:"bytes,5,opt,name=fsGroupPolicy"` - // TokenRequests indicates the CSI driver needs pods' service account + // tokenRequests indicates the CSI driver needs pods' service account // tokens it is mounting volume for to do necessary authentication. Kubelet // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. // The CSI driver should parse and validate the following VolumeContext: @@ -382,7 +383,7 @@ type CSIDriverSpec struct { // +listType=atomic TokenRequests []TokenRequest `json:"tokenRequests,omitempty" protobuf:"bytes,6,opt,name=tokenRequests"` - // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // requiresRepublish indicates the CSI driver wants `NodePublishVolume` // being periodically called to reflect any possible change in the mounted // volume. This field defaults to false. // @@ -393,7 +394,7 @@ type CSIDriverSpec struct { // +optional RequiresRepublish *bool `json:"requiresRepublish,omitempty" protobuf:"varint,7,opt,name=requiresRepublish"` - // SELinuxMount specifies if the CSI driver supports "-o context" + // seLinuxMount specifies if the CSI driver supports "-o context" // mount option. // // When "true", the CSI driver must ensure that all volumes provided by this CSI @@ -453,12 +454,11 @@ type VolumeLifecycleMode string // TokenRequest contains parameters of a service account token. type TokenRequest struct { - // Audience is the intended audience of the token in "TokenRequestSpec". + // audience is the intended audience of the token in "TokenRequestSpec". // It will default to the audiences of kube apiserver. - // Audience string `json:"audience" protobuf:"bytes,1,opt,name=audience"` - // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // expirationSeconds is the duration of validity of the token in "TokenRequestSpec". // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec". // // +optional @@ -502,6 +502,7 @@ const ( type CSINode struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // metadata.name must be the Kubernetes node name. metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -520,7 +521,7 @@ type CSINodeSpec struct { // CSINodeDriver holds information about the specification of one CSI driver installed on a node type CSINodeDriver struct { - // This is the name of the CSI driver that this object refers to. + // name represents the name of the CSI driver that this object refers to. // This MUST be the same name returned by the CSI GetPluginName() call for // that driver. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` @@ -557,7 +558,7 @@ type CSINodeDriver struct { // VolumeNodeResources is a set of resource limits for scheduling of volumes. type VolumeNodeResources struct { - // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. // A volume that is both attached and mounted on a node is considered to be used once, not twice. // The same rule applies for a unique volume that is shared among multiple pods on the same node. // If this field is not specified, then the supported number of volumes on this node is unbounded. @@ -609,11 +610,12 @@ type CSINodeList struct { // node. type CSIStorageCapacity struct { metav1.TypeMeta `json:",inline"` - // Standard object's metadata. The name has no particular meaning. It must be - // be a DNS subdomain (dots allowed, 253 characters). To ensure that - // there are no conflicts with other CSI drivers on the cluster, the recommendation - // is to use csisc-, a generated name, or a reverse-domain name which ends - // with the unique CSI driver name. + + // Standard object's metadata. + // The name has no particular meaning. It must be a DNS subdomain (dots allowed, 253 characters). + // To ensure that there are no conflicts with other CSI drivers on the cluster, + // the recommendation is to use csisc-, a generated name, or a reverse-domain name + // which ends with the unique CSI driver name. // // Objects are namespaced. // @@ -621,7 +623,7 @@ type CSIStorageCapacity struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // NodeTopology defines which nodes have access to the storage + // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is // not accessible from any node in the cluster. If empty, the // storage is accessible from all nodes. This field is @@ -630,7 +632,7 @@ type CSIStorageCapacity struct { // +optional NodeTopology *metav1.LabelSelector `json:"nodeTopology,omitempty" protobuf:"bytes,2,opt,name=nodeTopology"` - // The name of the StorageClass that the reported capacity applies to. + // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass // object (non-empty, DNS subdomain). If that object no longer exists, // the CSIStorageCapacity object is obsolete and should be removed by its @@ -638,7 +640,7 @@ type CSIStorageCapacity struct { // This field is immutable. StorageClassName string `json:"storageClassName" protobuf:"bytes,3,name=storageClassName"` - // Capacity is the value reported by the CSI driver in its GetCapacityResponse + // capacity is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -650,7 +652,7 @@ type CSIStorageCapacity struct { // +optional Capacity *resource.Quantity `json:"capacity,omitempty" protobuf:"bytes,4,opt,name=capacity"` - // MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -670,12 +672,13 @@ type CSIStorageCapacity struct { // CSIStorageCapacityList is a collection of CSIStorageCapacity objects. type CSIStorageCapacityList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of CSIStorageCapacity objects. + // items is the list of CSIStorageCapacity objects. // +listType=map // +listMapKey=name Items []CSIStorageCapacity `json:"items" protobuf:"bytes,2,rep,name=items"` diff --git a/cluster-autoscaler/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go index 1a069bb40378..c92a7f95a29e 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -24,13 +24,13 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CSIDriver = map[string]string{ "": "CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced.", "metadata": "Standard object metadata. metadata.Name indicates the name of the CSI driver that this object refers to; it MUST be the same name returned by the CSI GetPluginName() call for that driver. The driver name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and alphanumerics between. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the CSI Driver.", + "spec": "spec represents the specification of the CSI Driver.", } func (CSIDriver) SwaggerDoc() map[string]string { @@ -50,13 +50,13 @@ func (CSIDriverList) SwaggerDoc() map[string]string { var map_CSIDriverSpec = map[string]string{ "": "CSIDriverSpec is the specification of a CSIDriver.", "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\n\nThis field is immutable.", - "podInfoOnMount": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", - "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future. This field is beta.\n\nThis field is immutable.", - "storageCapacity": "If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.", - "fsGroupPolicy": "Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", - "tokenRequests": "TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.", - "requiresRepublish": "RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.", - "seLinuxMount": "SELinuxMount specifies if the CSI driver supports \"-o context\" mount option.\n\nWhen \"true\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \"-o context=xyz\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context.\n\nWhen \"false\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem.\n\nDefault is \"false\".", + "podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", + "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\n\nThe other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\n\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is beta. This field is immutable.", + "storageCapacity": "storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.", + "fsGroupPolicy": "fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", + "tokenRequests": "tokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.", + "requiresRepublish": "requiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.", + "seLinuxMount": "seLinuxMount specifies if the CSI driver supports \"-o context\" mount option.\n\nWhen \"true\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \"-o context=xyz\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context.\n\nWhen \"false\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem.\n\nDefault is \"false\".", } func (CSIDriverSpec) SwaggerDoc() map[string]string { @@ -65,7 +65,7 @@ func (CSIDriverSpec) SwaggerDoc() map[string]string { var map_CSINode = map[string]string{ "": "CSINode holds information about all CSI drivers installed on a node. CSI drivers do not need to create the CSINode object directly. As long as they use the node-driver-registrar sidecar container, the kubelet will automatically populate the CSINode object for the CSI driver as part of kubelet plugin registration. CSINode has the same name as a node. If the object is missing, it means either there are no CSI Drivers available on the node, or the Kubelet version is low enough that it doesn't create this object. CSINode has an OwnerReference that points to the corresponding node object.", - "metadata": "metadata.name must be the Kubernetes node name.", + "metadata": "Standard object's metadata. metadata.name must be the Kubernetes node name.", "spec": "spec is the specification of CSINode", } @@ -75,7 +75,7 @@ func (CSINode) SwaggerDoc() map[string]string { var map_CSINodeDriver = map[string]string{ "": "CSINodeDriver holds information about the specification of one CSI driver installed on a node", - "name": "This is the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", + "name": "name represents the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", "nodeID": "nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \"node1\", but the storage system may refer to the same node as \"nodeA\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \"nodeA\" instead of \"node1\". This field is required.", "topologyKeys": "topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \"company.com/zone\", \"company.com/region\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology.", "allocatable": "allocatable represents the volume resources of a node that are available for scheduling. This field is beta.", @@ -106,11 +106,11 @@ func (CSINodeSpec) SwaggerDoc() map[string]string { var map_CSIStorageCapacity = map[string]string{ "": "CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.\n\nFor example this can express things like: - StorageClass \"standard\" has \"1234 GiB\" available in \"topology.kubernetes.io/zone=us-east1\" - StorageClass \"localssd\" has \"10 GiB\" available in \"kubernetes.io/hostname=knode-abc123\"\n\nThe following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero\n\nThe producer of these objects can decide which approach is more suitable.\n\nThey are consumed by the kube-scheduler when a CSI driver opts into capacity-aware scheduling with CSIDriverSpec.StorageCapacity. The scheduler compares the MaximumVolumeSize against the requested size of pending volumes to filter out unsuitable nodes. If MaximumVolumeSize is unset, it falls back to a comparison against the less precise Capacity. If that is also unset, the scheduler assumes that capacity is insufficient and tries some other node.", - "metadata": "Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-, a generated name, or a reverse-domain name which ends with the unique CSI driver name.\n\nObjects are namespaced.\n\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "nodeTopology": "NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", - "storageClassName": "The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", - "capacity": "Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable.", - "maximumVolumeSize": "MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", + "metadata": "Standard object's metadata. The name has no particular meaning. It must be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-, a generated name, or a reverse-domain name which ends with the unique CSI driver name.\n\nObjects are namespaced.\n\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "nodeTopology": "nodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", + "storageClassName": "storageClassName represents the name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", + "capacity": "capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable.", + "maximumVolumeSize": "maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", } func (CSIStorageCapacity) SwaggerDoc() map[string]string { @@ -120,7 +120,7 @@ func (CSIStorageCapacity) SwaggerDoc() map[string]string { var map_CSIStorageCapacityList = map[string]string{ "": "CSIStorageCapacityList is a collection of CSIStorageCapacity objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of CSIStorageCapacity objects.", + "items": "items is the list of CSIStorageCapacity objects.", } func (CSIStorageCapacityList) SwaggerDoc() map[string]string { @@ -130,13 +130,13 @@ func (CSIStorageCapacityList) SwaggerDoc() map[string]string { var map_StorageClass = map[string]string{ "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "provisioner": "Provisioner indicates the type of the provisioner.", - "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", - "reclaimPolicy": "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.", - "mountOptions": "Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", - "allowVolumeExpansion": "AllowVolumeExpansion shows whether the storage class allow volume expand", - "volumeBindingMode": "VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.", - "allowedTopologies": "Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.", + "provisioner": "provisioner indicates the type of the provisioner.", + "parameters": "parameters holds the parameters for the provisioner that should create volumes of this storage class.", + "reclaimPolicy": "reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. Defaults to Delete.", + "mountOptions": "mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", + "allowVolumeExpansion": "allowVolumeExpansion shows whether the storage class allow volume expand.", + "volumeBindingMode": "volumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.", + "allowedTopologies": "allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.", } func (StorageClass) SwaggerDoc() map[string]string { @@ -146,7 +146,7 @@ func (StorageClass) SwaggerDoc() map[string]string { var map_StorageClassList = map[string]string{ "": "StorageClassList is a collection of storage classes.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of StorageClasses", + "items": "items is the list of StorageClasses", } func (StorageClassList) SwaggerDoc() map[string]string { @@ -155,8 +155,8 @@ func (StorageClassList) SwaggerDoc() map[string]string { var map_TokenRequest = map[string]string{ "": "TokenRequest contains parameters of a service account token.", - "audience": "Audience is the intended audience of the token in \"TokenRequestSpec\". It will default to the audiences of kube apiserver.", - "expirationSeconds": "ExpirationSeconds is the duration of validity of the token in \"TokenRequestSpec\". It has the same default value of \"ExpirationSeconds\" in \"TokenRequestSpec\".", + "audience": "audience is the intended audience of the token in \"TokenRequestSpec\". It will default to the audiences of kube apiserver.", + "expirationSeconds": "expirationSeconds is the duration of validity of the token in \"TokenRequestSpec\". It has the same default value of \"ExpirationSeconds\" in \"TokenRequestSpec\".", } func (TokenRequest) SwaggerDoc() map[string]string { @@ -166,8 +166,8 @@ func (TokenRequest) SwaggerDoc() map[string]string { var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", - "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", + "spec": "spec represents specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", + "status": "status represents status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", } func (VolumeAttachment) SwaggerDoc() map[string]string { @@ -177,7 +177,7 @@ func (VolumeAttachment) SwaggerDoc() map[string]string { var map_VolumeAttachmentList = map[string]string{ "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of VolumeAttachments", + "items": "items is the list of VolumeAttachments", } func (VolumeAttachmentList) SwaggerDoc() map[string]string { @@ -186,7 +186,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string { var map_VolumeAttachmentSource = map[string]string{ "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", - "persistentVolumeName": "Name of the persistent volume to attach.", + "persistentVolumeName": "persistentVolumeName represents the name of the persistent volume to attach.", } func (VolumeAttachmentSource) SwaggerDoc() map[string]string { @@ -195,9 +195,9 @@ func (VolumeAttachmentSource) SwaggerDoc() map[string]string { var map_VolumeAttachmentSpec = map[string]string{ "": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", - "attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", - "source": "Source represents the volume that should be attached.", - "nodeName": "The node that the volume should be attached to.", + "attacher": "attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", + "source": "source represents the volume that should be attached.", + "nodeName": "nodeName represents the node that the volume should be attached to.", } func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { @@ -206,10 +206,10 @@ func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { var map_VolumeAttachmentStatus = map[string]string{ "": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", - "attached": "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachError": "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "detachError": "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", + "attached": "attached indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachmentMetadata": "attachmentMetadata is populated with any information returned by the attach operation, upon successful attach, that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachError": "attachError represents the last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "detachError": "detachError represents the last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", } func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { @@ -218,8 +218,8 @@ func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { var map_VolumeError = map[string]string{ "": "VolumeError captures an error encountered during a volume operation.", - "time": "Time the error was encountered.", - "message": "String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", + "time": "time represents the time the error was encountered.", + "message": "message represents the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", } func (VolumeError) SwaggerDoc() map[string]string { @@ -228,7 +228,7 @@ func (VolumeError) SwaggerDoc() map[string]string { var map_VolumeNodeResources = map[string]string{ "": "VolumeNodeResources is a set of resource limits for scheduling of volumes.", - "count": "Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded.", + "count": "count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded.", } func (VolumeNodeResources) SwaggerDoc() map[string]string { diff --git a/cluster-autoscaler/vendor/k8s.io/api/storage/v1alpha1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/storage/v1alpha1/generated.proto index a5345122607d..88250a0f013b 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/storage/v1alpha1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/storage/v1alpha1/generated.proto @@ -67,7 +67,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // NodeTopology defines which nodes have access to the storage + // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is // not accessible from any node in the cluster. If empty, the // storage is accessible from all nodes. This field is @@ -76,7 +76,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; - // The name of the StorageClass that the reported capacity applies to. + // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass // object (non-empty, DNS subdomain). If that object no longer exists, // the CSIStorageCapacity object is obsolete and should be removed by its @@ -84,7 +84,7 @@ message CSIStorageCapacity { // This field is immutable. optional string storageClassName = 3; - // Capacity is the value reported by the CSI driver in its GetCapacityResponse + // capacity is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -96,7 +96,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; - // MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -118,7 +118,7 @@ message CSIStorageCapacityList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of CSIStorageCapacity objects. + // items is the list of CSIStorageCapacity objects. // +listType=map // +listMapKey=name repeated CSIStorageCapacity items = 2; @@ -134,11 +134,11 @@ message VolumeAttachment { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. optional VolumeAttachmentSpec spec = 2; - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -152,7 +152,7 @@ message VolumeAttachmentList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments repeated VolumeAttachment items = 2; } @@ -161,7 +161,7 @@ message VolumeAttachmentList { // in future we may allow also inline volumes in pods. // Exactly one member can be set. message VolumeAttachmentSource { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional optional string persistentVolumeName = 1; @@ -177,39 +177,39 @@ message VolumeAttachmentSource { // VolumeAttachmentSpec is the specification of a VolumeAttachment request. message VolumeAttachmentSpec { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). optional string attacher = 1; - // Source represents the volume that should be attached. + // source represents the volume that should be attached. optional VolumeAttachmentSource source = 2; - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. optional string nodeName = 3; } // VolumeAttachmentStatus is the status of a VolumeAttachment request. message VolumeAttachmentStatus { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. optional bool attached = 1; - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional map attachmentMetadata = 2; - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional optional VolumeError attachError = 3; - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional @@ -218,11 +218,11 @@ message VolumeAttachmentStatus { // VolumeError captures an error encountered during a volume operation. message VolumeError { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string maybe logged, so it should not contain sensitive // information. // +optional diff --git a/cluster-autoscaler/vendor/k8s.io/api/storage/v1alpha1/types.go b/cluster-autoscaler/vendor/k8s.io/api/storage/v1alpha1/types.go index fe8c9e3cd0ae..59ef348a316e 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/storage/v1alpha1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/storage/v1alpha1/types.go @@ -41,11 +41,11 @@ type VolumeAttachment struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -60,25 +60,26 @@ type VolumeAttachment struct { // VolumeAttachmentList is a collection of VolumeAttachment objects. type VolumeAttachmentList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"` } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. type VolumeAttachmentSpec struct { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"` - // Source represents the volume that should be attached. + // source represents the volume that should be attached. Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"` - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"` } @@ -87,7 +88,7 @@ type VolumeAttachmentSpec struct { // in future we may allow also inline volumes in pods. // Exactly one member can be set. type VolumeAttachmentSource struct { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` @@ -103,26 +104,26 @@ type VolumeAttachmentSource struct { // VolumeAttachmentStatus is the status of a VolumeAttachment request. type VolumeAttachmentStatus struct { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"` - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"` - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"` - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional @@ -131,11 +132,11 @@ type VolumeAttachmentStatus struct { // VolumeError captures an error encountered during a volume operation. type VolumeError struct { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string maybe logged, so it should not contain sensitive // information. // +optional @@ -174,6 +175,7 @@ type VolumeError struct { // node. type CSIStorageCapacity struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. The name has no particular meaning. It must be // be a DNS subdomain (dots allowed, 253 characters). To ensure that // there are no conflicts with other CSI drivers on the cluster, the recommendation @@ -186,7 +188,7 @@ type CSIStorageCapacity struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // NodeTopology defines which nodes have access to the storage + // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is // not accessible from any node in the cluster. If empty, the // storage is accessible from all nodes. This field is @@ -195,7 +197,7 @@ type CSIStorageCapacity struct { // +optional NodeTopology *metav1.LabelSelector `json:"nodeTopology,omitempty" protobuf:"bytes,2,opt,name=nodeTopology"` - // The name of the StorageClass that the reported capacity applies to. + // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass // object (non-empty, DNS subdomain). If that object no longer exists, // the CSIStorageCapacity object is obsolete and should be removed by its @@ -203,7 +205,7 @@ type CSIStorageCapacity struct { // This field is immutable. StorageClassName string `json:"storageClassName" protobuf:"bytes,3,name=storageClassName"` - // Capacity is the value reported by the CSI driver in its GetCapacityResponse + // capacity is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -215,7 +217,7 @@ type CSIStorageCapacity struct { // +optional Capacity *resource.Quantity `json:"capacity,omitempty" protobuf:"bytes,4,opt,name=capacity"` - // MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -238,12 +240,13 @@ type CSIStorageCapacity struct { // CSIStorageCapacityList is a collection of CSIStorageCapacity objects. type CSIStorageCapacityList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of CSIStorageCapacity objects. + // items is the list of CSIStorageCapacity objects. // +listType=map // +listMapKey=name Items []CSIStorageCapacity `json:"items" protobuf:"bytes,2,rep,name=items"` diff --git a/cluster-autoscaler/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go index a228a3fec7f5..ba6afbd59164 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go @@ -24,16 +24,16 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CSIStorageCapacity = map[string]string{ "": "CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.\n\nFor example this can express things like: - StorageClass \"standard\" has \"1234 GiB\" available in \"topology.kubernetes.io/zone=us-east1\" - StorageClass \"localssd\" has \"10 GiB\" available in \"kubernetes.io/hostname=knode-abc123\"\n\nThe following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero\n\nThe producer of these objects can decide which approach is more suitable.\n\nThey are consumed by the kube-scheduler when a CSI driver opts into capacity-aware scheduling with CSIDriverSpec.StorageCapacity. The scheduler compares the MaximumVolumeSize against the requested size of pending volumes to filter out unsuitable nodes. If MaximumVolumeSize is unset, it falls back to a comparison against the less precise Capacity. If that is also unset, the scheduler assumes that capacity is insufficient and tries some other node.", "metadata": "Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-, a generated name, or a reverse-domain name which ends with the unique CSI driver name.\n\nObjects are namespaced.\n\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "nodeTopology": "NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", - "storageClassName": "The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", - "capacity": "Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable.", - "maximumVolumeSize": "MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", + "nodeTopology": "nodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", + "storageClassName": "storageClassName represents the name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", + "capacity": "capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable.", + "maximumVolumeSize": "maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", } func (CSIStorageCapacity) SwaggerDoc() map[string]string { @@ -43,7 +43,7 @@ func (CSIStorageCapacity) SwaggerDoc() map[string]string { var map_CSIStorageCapacityList = map[string]string{ "": "CSIStorageCapacityList is a collection of CSIStorageCapacity objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of CSIStorageCapacity objects.", + "items": "items is the list of CSIStorageCapacity objects.", } func (CSIStorageCapacityList) SwaggerDoc() map[string]string { @@ -53,8 +53,8 @@ func (CSIStorageCapacityList) SwaggerDoc() map[string]string { var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", - "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", + "spec": "spec represents specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", + "status": "status represents status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", } func (VolumeAttachment) SwaggerDoc() map[string]string { @@ -64,7 +64,7 @@ func (VolumeAttachment) SwaggerDoc() map[string]string { var map_VolumeAttachmentList = map[string]string{ "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of VolumeAttachments", + "items": "items is the list of VolumeAttachments", } func (VolumeAttachmentList) SwaggerDoc() map[string]string { @@ -73,7 +73,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string { var map_VolumeAttachmentSource = map[string]string{ "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", - "persistentVolumeName": "Name of the persistent volume to attach.", + "persistentVolumeName": "persistentVolumeName represents the name of the persistent volume to attach.", } func (VolumeAttachmentSource) SwaggerDoc() map[string]string { @@ -82,9 +82,9 @@ func (VolumeAttachmentSource) SwaggerDoc() map[string]string { var map_VolumeAttachmentSpec = map[string]string{ "": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", - "attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", - "source": "Source represents the volume that should be attached.", - "nodeName": "The node that the volume should be attached to.", + "attacher": "attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", + "source": "source represents the volume that should be attached.", + "nodeName": "nodeName represents the node that the volume should be attached to.", } func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { @@ -93,10 +93,10 @@ func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { var map_VolumeAttachmentStatus = map[string]string{ "": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", - "attached": "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachError": "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "detachError": "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", + "attached": "attached indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachmentMetadata": "attachmentMetadata is populated with any information returned by the attach operation, upon successful attach, that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachError": "attachError represents the last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "detachError": "detachError represents the last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", } func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { @@ -105,8 +105,8 @@ func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { var map_VolumeError = map[string]string{ "": "VolumeError captures an error encountered during a volume operation.", - "time": "Time the error was encountered.", - "message": "String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.", + "time": "time represents the time the error was encountered.", + "message": "message represents the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.", } func (VolumeError) SwaggerDoc() map[string]string { diff --git a/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/generated.proto index bedbd3183890..b6eec40f3c8c 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/generated.proto @@ -49,7 +49,7 @@ message CSIDriver { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the CSI Driver. + // spec represents the specification of the CSI Driver. optional CSIDriverSpec spec = 2; } @@ -82,16 +82,15 @@ message CSIDriverSpec { // +optional optional bool attachRequired = 1; - // If set to true, podInfoOnMount indicates this CSI volume driver - // requires additional pod information (like podName, podUID, etc.) during - // mount operations. + // podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) + // during mount operations, if set to true. // If set to false, pod information will not be passed on mount. // Default is false. + // // The CSI driver specifies podInfoOnMount as part of driver deployment. - // If true, Kubelet will pass pod information as VolumeContext in the CSI - // NodePublishVolume() calls. - // The CSI driver is responsible for parsing and validating the information - // passed in as VolumeContext. + // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. + // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. + // // The following VolumeConext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name @@ -112,14 +111,14 @@ message CSIDriverSpec { // +optional optional bool podInfoOnMount = 2; - // VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. - // The default if the list is empty is "Persistent", which is the usage - // defined by the CSI specification and implemented in Kubernetes via the usual - // PV/PVC mechanism. - // The other mode is "Ephemeral". In this mode, volumes are defined inline - // inside the pod spec with CSIVolumeSource and their lifecycle is tied to - // the lifecycle of that pod. A driver has to be aware of this - // because it is only going to get a NodePublishVolume call for such a volume. + // volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. + // The default if the list is empty is "Persistent", which is the usage defined by the + // CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. + // + // The other mode is "Ephemeral". In this mode, volumes are defined inline inside the pod spec + // with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. + // A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. + // // For more information about implementing this mode, see // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html // A driver can support one or more of these modes and @@ -130,10 +129,9 @@ message CSIDriverSpec { // +optional repeated string volumeLifecycleModes = 3; - // If set to true, storageCapacity indicates that the CSI - // volume driver wants pod scheduling to consider the storage + // storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage // capacity that the driver deployment will report by creating - // CSIStorageCapacity objects with capacity information. + // CSIStorageCapacity objects with capacity information, if set to true. // // The check can be enabled immediately when deploying a driver. // In that case, provisioning new volumes with late binding @@ -149,7 +147,7 @@ message CSIDriverSpec { // +optional optional bool storageCapacity = 4; - // Defines if the underlying volume supports changing ownership and + // fsGroupPolicy defines if the underlying volume supports changing ownership and // permission of the volume before being mounted. // Refer to the specific FSGroupPolicy values for additional details. // @@ -159,10 +157,11 @@ message CSIDriverSpec { // to determine if Kubernetes should modify ownership and permissions of the volume. // With the default policy the defined fsGroup will only be applied // if a fstype is defined and the volume's access mode contains ReadWriteOnce. + // // +optional optional string fsGroupPolicy = 5; - // TokenRequests indicates the CSI driver needs pods' service account + // tokenRequests indicates the CSI driver needs pods' service account // tokens it is mounting volume for to do necessary authentication. Kubelet // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. // The CSI driver should parse and validate the following VolumeContext: @@ -182,7 +181,7 @@ message CSIDriverSpec { // +listType=atomic repeated TokenRequest tokenRequests = 6; - // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // requiresRepublish indicates the CSI driver wants `NodePublishVolume` // being periodically called to reflect any possible change in the mounted // volume. This field defaults to false. // @@ -193,7 +192,7 @@ message CSIDriverSpec { // +optional optional bool requiresRepublish = 7; - // SELinuxMount specifies if the CSI driver supports "-o context" + // seLinuxMount specifies if the CSI driver supports "-o context" // mount option. // // When "true", the CSI driver must ensure that all volumes provided by this CSI @@ -236,7 +235,7 @@ message CSINode { // CSINodeDriver holds information about the specification of one CSI driver installed on a node message CSINodeDriver { - // This is the name of the CSI driver that this object refers to. + // name represents the name of the CSI driver that this object refers to. // This MUST be the same name returned by the CSI GetPluginName() call for // that driver. optional string name = 1; @@ -327,7 +326,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // NodeTopology defines which nodes have access to the storage + // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is // not accessible from any node in the cluster. If empty, the // storage is accessible from all nodes. This field is @@ -336,7 +335,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; - // The name of the StorageClass that the reported capacity applies to. + // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass // object (non-empty, DNS subdomain). If that object no longer exists, // the CSIStorageCapacity object is obsolete and should be removed by its @@ -344,7 +343,7 @@ message CSIStorageCapacity { // This field is immutable. optional string storageClassName = 3; - // Capacity is the value reported by the CSI driver in its GetCapacityResponse + // capacity is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -356,7 +355,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; - // MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -378,7 +377,7 @@ message CSIStorageCapacityList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of CSIStorageCapacity objects. + // items is the list of CSIStorageCapacity objects. // +listType=map // +listMapKey=name repeated CSIStorageCapacity items = 2; @@ -395,36 +394,36 @@ message StorageClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Provisioner indicates the type of the provisioner. + // provisioner indicates the type of the provisioner. optional string provisioner = 2; - // Parameters holds the parameters for the provisioner that should + // parameters holds the parameters for the provisioner that should // create volumes of this storage class. // +optional map parameters = 3; - // Dynamically provisioned PersistentVolumes of this storage class are - // created with this reclaimPolicy. Defaults to Delete. + // reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. + // Defaults to Delete. // +optional optional string reclaimPolicy = 4; - // Dynamically provisioned PersistentVolumes of this storage class are - // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - + // mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. + // e.g. ["ro", "soft"]. Not validated - // mount of the PVs will simply fail if one is invalid. // +optional repeated string mountOptions = 5; - // AllowVolumeExpansion shows whether the storage class allow volume expand + // allowVolumeExpansion shows whether the storage class allow volume expand // +optional optional bool allowVolumeExpansion = 6; - // VolumeBindingMode indicates how PersistentVolumeClaims should be + // volumeBindingMode indicates how PersistentVolumeClaims should be // provisioned and bound. When unset, VolumeBindingImmediate is used. // This field is only honored by servers that enable the VolumeScheduling feature. // +optional optional string volumeBindingMode = 7; - // Restrict the node topologies where volumes can be dynamically provisioned. + // allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. // Each volume plugin defines its own supported topology specifications. // An empty TopologySelectorTerm list means there is no topology restriction. // This field is only honored by servers that enable the VolumeScheduling feature. @@ -440,17 +439,17 @@ message StorageClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of StorageClasses + // items is the list of StorageClasses repeated StorageClass items = 2; } // TokenRequest contains parameters of a service account token. message TokenRequest { - // Audience is the intended audience of the token in "TokenRequestSpec". + // audience is the intended audience of the token in "TokenRequestSpec". // It will default to the audiences of kube apiserver. optional string audience = 1; - // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // expirationSeconds is the duration of validity of the token in "TokenRequestSpec". // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec" // // +optional @@ -467,11 +466,11 @@ message VolumeAttachment { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. optional VolumeAttachmentSpec spec = 2; - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -485,7 +484,7 @@ message VolumeAttachmentList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments repeated VolumeAttachment items = 2; } @@ -494,7 +493,7 @@ message VolumeAttachmentList { // in future we may allow also inline volumes in pods. // Exactly one member can be set. message VolumeAttachmentSource { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional optional string persistentVolumeName = 1; @@ -510,39 +509,39 @@ message VolumeAttachmentSource { // VolumeAttachmentSpec is the specification of a VolumeAttachment request. message VolumeAttachmentSpec { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). optional string attacher = 1; - // Source represents the volume that should be attached. + // source represents the volume that should be attached. optional VolumeAttachmentSource source = 2; - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. optional string nodeName = 3; } // VolumeAttachmentStatus is the status of a VolumeAttachment request. message VolumeAttachmentStatus { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. optional bool attached = 1; - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional map attachmentMetadata = 2; - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional optional VolumeError attachError = 3; - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional @@ -551,11 +550,11 @@ message VolumeAttachmentStatus { // VolumeError captures an error encountered during a volume operation. message VolumeError { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string may be logged, so it should not contain sensitive // information. // +optional @@ -564,7 +563,7 @@ message VolumeError { // VolumeNodeResources is a set of resource limits for scheduling of volumes. message VolumeNodeResources { - // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. // A volume that is both attached and mounted on a node is considered to be used once, not twice. // The same rule applies for a unique volume that is shared among multiple pods on the same node. // If this field is nil, then the supported number of volumes on this node is unbounded. diff --git a/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/types.go b/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/types.go index f4d09b641a90..b3129cb3bf12 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/types.go @@ -36,41 +36,42 @@ import ( // according to etcd is in ObjectMeta.Name. type StorageClass struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Provisioner indicates the type of the provisioner. + // provisioner indicates the type of the provisioner. Provisioner string `json:"provisioner" protobuf:"bytes,2,opt,name=provisioner"` - // Parameters holds the parameters for the provisioner that should + // parameters holds the parameters for the provisioner that should // create volumes of this storage class. // +optional Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` - // Dynamically provisioned PersistentVolumes of this storage class are - // created with this reclaimPolicy. Defaults to Delete. + // reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. + // Defaults to Delete. // +optional ReclaimPolicy *v1.PersistentVolumeReclaimPolicy `json:"reclaimPolicy,omitempty" protobuf:"bytes,4,opt,name=reclaimPolicy,casttype=k8s.io/api/core/v1.PersistentVolumeReclaimPolicy"` - // Dynamically provisioned PersistentVolumes of this storage class are - // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - + // mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. + // e.g. ["ro", "soft"]. Not validated - // mount of the PVs will simply fail if one is invalid. // +optional MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,5,opt,name=mountOptions"` - // AllowVolumeExpansion shows whether the storage class allow volume expand + // allowVolumeExpansion shows whether the storage class allow volume expand // +optional AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty" protobuf:"varint,6,opt,name=allowVolumeExpansion"` - // VolumeBindingMode indicates how PersistentVolumeClaims should be + // volumeBindingMode indicates how PersistentVolumeClaims should be // provisioned and bound. When unset, VolumeBindingImmediate is used. // This field is only honored by servers that enable the VolumeScheduling feature. // +optional VolumeBindingMode *VolumeBindingMode `json:"volumeBindingMode,omitempty" protobuf:"bytes,7,opt,name=volumeBindingMode"` - // Restrict the node topologies where volumes can be dynamically provisioned. + // allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. // Each volume plugin defines its own supported topology specifications. // An empty TopologySelectorTerm list means there is no topology restriction. // This field is only honored by servers that enable the VolumeScheduling feature. @@ -87,12 +88,13 @@ type StorageClass struct { // StorageClassList is a collection of storage classes. type StorageClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of StorageClasses + // items is the list of StorageClasses Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -130,11 +132,11 @@ type VolumeAttachment struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -149,25 +151,26 @@ type VolumeAttachment struct { // VolumeAttachmentList is a collection of VolumeAttachment objects. type VolumeAttachmentList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"` } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. type VolumeAttachmentSpec struct { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"` - // Source represents the volume that should be attached. + // source represents the volume that should be attached. Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"` - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"` } @@ -176,7 +179,7 @@ type VolumeAttachmentSpec struct { // in future we may allow also inline volumes in pods. // Exactly one member can be set. type VolumeAttachmentSource struct { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` @@ -192,26 +195,26 @@ type VolumeAttachmentSource struct { // VolumeAttachmentStatus is the status of a VolumeAttachment request. type VolumeAttachmentStatus struct { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"` - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"` - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"` - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional @@ -220,11 +223,11 @@ type VolumeAttachmentStatus struct { // VolumeError captures an error encountered during a volume operation. type VolumeError struct { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string may be logged, so it should not contain sensitive // information. // +optional @@ -259,7 +262,7 @@ type CSIDriver struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the CSI Driver. + // spec represents the specification of the CSI Driver. Spec CSIDriverSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` } @@ -299,16 +302,15 @@ type CSIDriverSpec struct { // +optional AttachRequired *bool `json:"attachRequired,omitempty" protobuf:"varint,1,opt,name=attachRequired"` - // If set to true, podInfoOnMount indicates this CSI volume driver - // requires additional pod information (like podName, podUID, etc.) during - // mount operations. + // podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) + // during mount operations, if set to true. // If set to false, pod information will not be passed on mount. // Default is false. + // // The CSI driver specifies podInfoOnMount as part of driver deployment. - // If true, Kubelet will pass pod information as VolumeContext in the CSI - // NodePublishVolume() calls. - // The CSI driver is responsible for parsing and validating the information - // passed in as VolumeContext. + // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. + // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. + // // The following VolumeConext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name @@ -329,14 +331,14 @@ type CSIDriverSpec struct { // +optional PodInfoOnMount *bool `json:"podInfoOnMount,omitempty" protobuf:"bytes,2,opt,name=podInfoOnMount"` - // VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. - // The default if the list is empty is "Persistent", which is the usage - // defined by the CSI specification and implemented in Kubernetes via the usual - // PV/PVC mechanism. - // The other mode is "Ephemeral". In this mode, volumes are defined inline - // inside the pod spec with CSIVolumeSource and their lifecycle is tied to - // the lifecycle of that pod. A driver has to be aware of this - // because it is only going to get a NodePublishVolume call for such a volume. + // volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. + // The default if the list is empty is "Persistent", which is the usage defined by the + // CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. + // + // The other mode is "Ephemeral". In this mode, volumes are defined inline inside the pod spec + // with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. + // A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. + // // For more information about implementing this mode, see // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html // A driver can support one or more of these modes and @@ -347,11 +349,9 @@ type CSIDriverSpec struct { // +optional VolumeLifecycleModes []VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty" protobuf:"bytes,3,opt,name=volumeLifecycleModes"` - // If set to true, storageCapacity indicates that the CSI - // volume driver wants pod scheduling to consider the storage + // storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage // capacity that the driver deployment will report by creating - // CSIStorageCapacity objects with capacity information. - // + // CSIStorageCapacity objects with capacity information, if set to true. // // The check can be enabled immediately when deploying a driver. // In that case, provisioning new volumes with late binding @@ -367,7 +367,7 @@ type CSIDriverSpec struct { // +optional StorageCapacity *bool `json:"storageCapacity,omitempty" protobuf:"bytes,4,opt,name=storageCapacity"` - // Defines if the underlying volume supports changing ownership and + // fsGroupPolicy defines if the underlying volume supports changing ownership and // permission of the volume before being mounted. // Refer to the specific FSGroupPolicy values for additional details. // @@ -377,10 +377,11 @@ type CSIDriverSpec struct { // to determine if Kubernetes should modify ownership and permissions of the volume. // With the default policy the defined fsGroup will only be applied // if a fstype is defined and the volume's access mode contains ReadWriteOnce. + // // +optional FSGroupPolicy *FSGroupPolicy `json:"fsGroupPolicy,omitempty" protobuf:"bytes,5,opt,name=fsGroupPolicy"` - // TokenRequests indicates the CSI driver needs pods' service account + // tokenRequests indicates the CSI driver needs pods' service account // tokens it is mounting volume for to do necessary authentication. Kubelet // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. // The CSI driver should parse and validate the following VolumeContext: @@ -400,7 +401,7 @@ type CSIDriverSpec struct { // +listType=atomic TokenRequests []TokenRequest `json:"tokenRequests,omitempty" protobuf:"bytes,6,opt,name=tokenRequests"` - // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // requiresRepublish indicates the CSI driver wants `NodePublishVolume` // being periodically called to reflect any possible change in the mounted // volume. This field defaults to false. // @@ -411,7 +412,7 @@ type CSIDriverSpec struct { // +optional RequiresRepublish *bool `json:"requiresRepublish,omitempty" protobuf:"varint,7,opt,name=requiresRepublish"` - // SELinuxMount specifies if the CSI driver supports "-o context" + // seLinuxMount specifies if the CSI driver supports "-o context" // mount option. // // When "true", the CSI driver must ensure that all volumes provided by this CSI @@ -466,12 +467,11 @@ type VolumeLifecycleMode string // TokenRequest contains parameters of a service account token. type TokenRequest struct { - // Audience is the intended audience of the token in "TokenRequestSpec". + // audience is the intended audience of the token in "TokenRequestSpec". // It will default to the audiences of kube apiserver. - // Audience string `json:"audience" protobuf:"bytes,1,opt,name=audience"` - // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // expirationSeconds is the duration of validity of the token in "TokenRequestSpec". // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec" // // +optional @@ -539,7 +539,7 @@ type CSINodeSpec struct { // CSINodeDriver holds information about the specification of one CSI driver installed on a node type CSINodeDriver struct { - // This is the name of the CSI driver that this object refers to. + // name represents the name of the CSI driver that this object refers to. // This MUST be the same name returned by the CSI GetPluginName() call for // that driver. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` @@ -575,7 +575,7 @@ type CSINodeDriver struct { // VolumeNodeResources is a set of resource limits for scheduling of volumes. type VolumeNodeResources struct { - // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. // A volume that is both attached and mounted on a node is considered to be used once, not twice. // The same rule applies for a unique volume that is shared among multiple pods on the same node. // If this field is nil, then the supported number of volumes on this node is unbounded. @@ -634,6 +634,7 @@ type CSINodeList struct { // node. type CSIStorageCapacity struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. The name has no particular meaning. It must be // be a DNS subdomain (dots allowed, 253 characters). To ensure that // there are no conflicts with other CSI drivers on the cluster, the recommendation @@ -646,7 +647,7 @@ type CSIStorageCapacity struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // NodeTopology defines which nodes have access to the storage + // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is // not accessible from any node in the cluster. If empty, the // storage is accessible from all nodes. This field is @@ -655,7 +656,7 @@ type CSIStorageCapacity struct { // +optional NodeTopology *metav1.LabelSelector `json:"nodeTopology,omitempty" protobuf:"bytes,2,opt,name=nodeTopology"` - // The name of the StorageClass that the reported capacity applies to. + // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass // object (non-empty, DNS subdomain). If that object no longer exists, // the CSIStorageCapacity object is obsolete and should be removed by its @@ -663,7 +664,7 @@ type CSIStorageCapacity struct { // This field is immutable. StorageClassName string `json:"storageClassName" protobuf:"bytes,3,name=storageClassName"` - // Capacity is the value reported by the CSI driver in its GetCapacityResponse + // capacity is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -675,7 +676,7 @@ type CSIStorageCapacity struct { // +optional Capacity *resource.Quantity `json:"capacity,omitempty" protobuf:"bytes,4,opt,name=capacity"` - // MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -698,12 +699,13 @@ type CSIStorageCapacity struct { // CSIStorageCapacityList is a collection of CSIStorageCapacity objects. type CSIStorageCapacityList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of CSIStorageCapacity objects. + // items is the list of CSIStorageCapacity objects. // +listType=map // +listMapKey=name Items []CSIStorageCapacity `json:"items" protobuf:"bytes,2,rep,name=items"` diff --git a/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go index ea3c1e4c282c..0f2718b9c145 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -24,13 +24,13 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CSIDriver = map[string]string{ "": "CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. CSI drivers do not need to create the CSIDriver object directly. Instead they may use the cluster-driver-registrar sidecar container. When deployed with a CSI driver it automatically creates a CSIDriver object representing the driver. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced.", "metadata": "Standard object metadata. metadata.Name indicates the name of the CSI driver that this object refers to; it MUST be the same name returned by the CSI GetPluginName() call for that driver. The driver name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and alphanumerics between. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the CSI Driver.", + "spec": "spec represents the specification of the CSI Driver.", } func (CSIDriver) SwaggerDoc() map[string]string { @@ -50,13 +50,13 @@ func (CSIDriverList) SwaggerDoc() map[string]string { var map_CSIDriverSpec = map[string]string{ "": "CSIDriverSpec is the specification of a CSIDriver.", "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\n\nThis field is immutable.", - "podInfoOnMount": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", - "volumeLifecycleModes": "VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is immutable.", - "storageCapacity": "If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.", - "fsGroupPolicy": "Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", - "tokenRequests": "TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.", - "requiresRepublish": "RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.", - "seLinuxMount": "SELinuxMount specifies if the CSI driver supports \"-o context\" mount option.\n\nWhen \"true\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \"-o context=xyz\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context.\n\nWhen \"false\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem.\n\nDefault is \"false\".", + "podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", + "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\n\nThe other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\n\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is immutable.", + "storageCapacity": "storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.", + "fsGroupPolicy": "fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", + "tokenRequests": "tokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.", + "requiresRepublish": "requiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.", + "seLinuxMount": "seLinuxMount specifies if the CSI driver supports \"-o context\" mount option.\n\nWhen \"true\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \"-o context=xyz\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context.\n\nWhen \"false\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem.\n\nDefault is \"false\".", } func (CSIDriverSpec) SwaggerDoc() map[string]string { @@ -75,7 +75,7 @@ func (CSINode) SwaggerDoc() map[string]string { var map_CSINodeDriver = map[string]string{ "": "CSINodeDriver holds information about the specification of one CSI driver installed on a node", - "name": "This is the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", + "name": "name represents the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", "nodeID": "nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \"node1\", but the storage system may refer to the same node as \"nodeA\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \"nodeA\" instead of \"node1\". This field is required.", "topologyKeys": "topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \"company.com/zone\", \"company.com/region\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology.", "allocatable": "allocatable represents the volume resources of a node that are available for scheduling.", @@ -107,10 +107,10 @@ func (CSINodeSpec) SwaggerDoc() map[string]string { var map_CSIStorageCapacity = map[string]string{ "": "CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.\n\nFor example this can express things like: - StorageClass \"standard\" has \"1234 GiB\" available in \"topology.kubernetes.io/zone=us-east1\" - StorageClass \"localssd\" has \"10 GiB\" available in \"kubernetes.io/hostname=knode-abc123\"\n\nThe following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero\n\nThe producer of these objects can decide which approach is more suitable.\n\nThey are consumed by the kube-scheduler when a CSI driver opts into capacity-aware scheduling with CSIDriverSpec.StorageCapacity. The scheduler compares the MaximumVolumeSize against the requested size of pending volumes to filter out unsuitable nodes. If MaximumVolumeSize is unset, it falls back to a comparison against the less precise Capacity. If that is also unset, the scheduler assumes that capacity is insufficient and tries some other node.", "metadata": "Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-, a generated name, or a reverse-domain name which ends with the unique CSI driver name.\n\nObjects are namespaced.\n\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "nodeTopology": "NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", - "storageClassName": "The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", - "capacity": "Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable.", - "maximumVolumeSize": "MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", + "nodeTopology": "nodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", + "storageClassName": "storageClassName represents the name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", + "capacity": "capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable.", + "maximumVolumeSize": "maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", } func (CSIStorageCapacity) SwaggerDoc() map[string]string { @@ -120,7 +120,7 @@ func (CSIStorageCapacity) SwaggerDoc() map[string]string { var map_CSIStorageCapacityList = map[string]string{ "": "CSIStorageCapacityList is a collection of CSIStorageCapacity objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of CSIStorageCapacity objects.", + "items": "items is the list of CSIStorageCapacity objects.", } func (CSIStorageCapacityList) SwaggerDoc() map[string]string { @@ -130,13 +130,13 @@ func (CSIStorageCapacityList) SwaggerDoc() map[string]string { var map_StorageClass = map[string]string{ "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "provisioner": "Provisioner indicates the type of the provisioner.", - "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", - "reclaimPolicy": "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.", - "mountOptions": "Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", - "allowVolumeExpansion": "AllowVolumeExpansion shows whether the storage class allow volume expand", - "volumeBindingMode": "VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.", - "allowedTopologies": "Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.", + "provisioner": "provisioner indicates the type of the provisioner.", + "parameters": "parameters holds the parameters for the provisioner that should create volumes of this storage class.", + "reclaimPolicy": "reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. Defaults to Delete.", + "mountOptions": "mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", + "allowVolumeExpansion": "allowVolumeExpansion shows whether the storage class allow volume expand", + "volumeBindingMode": "volumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.", + "allowedTopologies": "allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.", } func (StorageClass) SwaggerDoc() map[string]string { @@ -146,7 +146,7 @@ func (StorageClass) SwaggerDoc() map[string]string { var map_StorageClassList = map[string]string{ "": "StorageClassList is a collection of storage classes.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of StorageClasses", + "items": "items is the list of StorageClasses", } func (StorageClassList) SwaggerDoc() map[string]string { @@ -155,8 +155,8 @@ func (StorageClassList) SwaggerDoc() map[string]string { var map_TokenRequest = map[string]string{ "": "TokenRequest contains parameters of a service account token.", - "audience": "Audience is the intended audience of the token in \"TokenRequestSpec\". It will default to the audiences of kube apiserver.", - "expirationSeconds": "ExpirationSeconds is the duration of validity of the token in \"TokenRequestSpec\". It has the same default value of \"ExpirationSeconds\" in \"TokenRequestSpec\"", + "audience": "audience is the intended audience of the token in \"TokenRequestSpec\". It will default to the audiences of kube apiserver.", + "expirationSeconds": "expirationSeconds is the duration of validity of the token in \"TokenRequestSpec\". It has the same default value of \"ExpirationSeconds\" in \"TokenRequestSpec\"", } func (TokenRequest) SwaggerDoc() map[string]string { @@ -166,8 +166,8 @@ func (TokenRequest) SwaggerDoc() map[string]string { var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", - "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", + "spec": "spec represents specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", + "status": "status represents status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", } func (VolumeAttachment) SwaggerDoc() map[string]string { @@ -177,7 +177,7 @@ func (VolumeAttachment) SwaggerDoc() map[string]string { var map_VolumeAttachmentList = map[string]string{ "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of VolumeAttachments", + "items": "items is the list of VolumeAttachments", } func (VolumeAttachmentList) SwaggerDoc() map[string]string { @@ -186,7 +186,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string { var map_VolumeAttachmentSource = map[string]string{ "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", - "persistentVolumeName": "Name of the persistent volume to attach.", + "persistentVolumeName": "persistentVolumeName represents the name of the persistent volume to attach.", } func (VolumeAttachmentSource) SwaggerDoc() map[string]string { @@ -195,9 +195,9 @@ func (VolumeAttachmentSource) SwaggerDoc() map[string]string { var map_VolumeAttachmentSpec = map[string]string{ "": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", - "attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", - "source": "Source represents the volume that should be attached.", - "nodeName": "The node that the volume should be attached to.", + "attacher": "attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", + "source": "source represents the volume that should be attached.", + "nodeName": "nodeName represents the node that the volume should be attached to.", } func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { @@ -206,10 +206,10 @@ func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { var map_VolumeAttachmentStatus = map[string]string{ "": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", - "attached": "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachError": "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "detachError": "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", + "attached": "attached indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachmentMetadata": "attachmentMetadata is populated with any information returned by the attach operation, upon successful attach, that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachError": "attachError represents the last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "detachError": "detachError represents the last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", } func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { @@ -218,8 +218,8 @@ func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { var map_VolumeError = map[string]string{ "": "VolumeError captures an error encountered during a volume operation.", - "time": "Time the error was encountered.", - "message": "String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", + "time": "time represents the time the error was encountered.", + "message": "message represents the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", } func (VolumeError) SwaggerDoc() map[string]string { @@ -228,7 +228,7 @@ func (VolumeError) SwaggerDoc() map[string]string { var map_VolumeNodeResources = map[string]string{ "": "VolumeNodeResources is a set of resource limits for scheduling of volumes.", - "count": "Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is nil, then the supported number of volumes on this node is unbounded.", + "count": "count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is nil, then the supported number of volumes on this node is unbounded.", } func (VolumeNodeResources) SwaggerDoc() map[string]string { diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/meta/help.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/meta/help.go index 899d3e8a6670..1bf6b06d47f8 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/meta/help.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/meta/help.go @@ -40,8 +40,7 @@ var ( // IsListType returns true if the provided Object has a slice called Items. // TODO: Replace the code in this check with an interface comparison by -// -// creating and enforcing that lists implement a list accessor. +// creating and enforcing that lists implement a list accessor. func IsListType(obj runtime.Object) bool { switch t := obj.(type) { case runtime.Unstructured: diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go index 9570726a0d27..42a0a82c4d71 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_APIGroup = map[string]string{ diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go index ef7e7c1e9017..dff735dcf356 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PartialObjectMetadataList = map[string]string{ diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/labels/labels.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/labels/labels.go index 8360d842b6ce..19d823cef7ba 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/labels/labels.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/labels/labels.go @@ -77,6 +77,8 @@ func (ls Set) AsValidatedSelector() (Selector, error) { // perform any validation. // According to our measurements this is significantly faster // in codepaths that matter at high scale. +// Note: this method copies the Set; if the Set is immutable, consider wrapping it with ValidatedSetSelector +// instead, which does not copy. func (ls Set) AsSelectorPreValidated() Selector { return SelectorFromValidatedSet(ls) } diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/labels/selector.go index 8910043890c5..5e601424051e 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/labels/selector.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/labels/selector.go @@ -149,14 +149,12 @@ type Requirement struct { // NewRequirement is the constructor for a Requirement. // If any of these rules is violated, an error is returned: -// (1) The operator can only be In, NotIn, Equals, DoubleEquals, Gt, Lt, NotEquals, Exists, or DoesNotExist. -// (2) If the operator is In or NotIn, the values set must be non-empty. -// (3) If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value. -// (4) If the operator is Exists or DoesNotExist, the value set must be empty. -// (5) If the operator is Gt or Lt, the values set must contain only one value, which will be interpreted as an integer. -// (6) The key is invalid due to its length, or sequence -// -// of characters. See validateLabelKey for more details. +// 1. The operator can only be In, NotIn, Equals, DoubleEquals, Gt, Lt, NotEquals, Exists, or DoesNotExist. +// 2. If the operator is In or NotIn, the values set must be non-empty. +// 3. If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value. +// 4. If the operator is Exists or DoesNotExist, the value set must be empty. +// 5. If the operator is Gt or Lt, the values set must contain only one value, which will be interpreted as an integer. +// 6. The key is invalid due to its length, or sequence of characters. See validateLabelKey for more details. // // The empty string is a valid value in the input values set. // Returned error, if not nil, is guaranteed to be an aggregated field.ErrorList @@ -213,22 +211,15 @@ func (r *Requirement) hasValue(value string) bool { // Matches returns true if the Requirement matches the input Labels. // There is a match in the following cases: -// (1) The operator is Exists and Labels has the Requirement's key. -// (2) The operator is In, Labels has the Requirement's key and Labels' -// -// value for that key is in Requirement's value set. -// -// (3) The operator is NotIn, Labels has the Requirement's key and -// -// Labels' value for that key is not in Requirement's value set. -// -// (4) The operator is DoesNotExist or NotIn and Labels does not have the -// -// Requirement's key. -// -// (5) The operator is GreaterThanOperator or LessThanOperator, and Labels has -// -// the Requirement's key and the corresponding value satisfies mathematical inequality. +// 1. The operator is Exists and Labels has the Requirement's key. +// 2. The operator is In, Labels has the Requirement's key and Labels' +// value for that key is in Requirement's value set. +// 3. The operator is NotIn, Labels has the Requirement's key and +// Labels' value for that key is not in Requirement's value set. +// 4. The operator is DoesNotExist or NotIn and Labels does not have the +// Requirement's key. +// 5. The operator is GreaterThanOperator or LessThanOperator, and Labels has +// the Requirement's key and the corresponding value satisfies mathematical inequality. func (r *Requirement) Matches(ls Labels) bool { switch r.operator { case selection.In, selection.Equals, selection.DoubleEquals: @@ -872,15 +863,14 @@ func (p *Parser) parseExactValue() (sets.String, error) { // "x in (foo,,baz),y,z notin ()" // // Note: -// -// (1) Inclusion - " in " - denotes that the KEY exists and is equal to any of the -// VALUEs in its requirement -// (2) Exclusion - " notin " - denotes that the KEY is not equal to any -// of the VALUEs in its requirement or does not exist -// (3) The empty string is a valid VALUE -// (4) A requirement with just a KEY - as in "y" above - denotes that -// the KEY exists and can be any VALUE. -// (5) A requirement with just !KEY requires that the KEY not exist. +// 1. Inclusion - " in " - denotes that the KEY exists and is equal to any of the +// VALUEs in its requirement +// 2. Exclusion - " notin " - denotes that the KEY is not equal to any +// of the VALUEs in its requirement or does not exist +// 3. The empty string is a valid VALUE +// 4. A requirement with just a KEY - as in "y" above - denotes that +// the KEY exists and can be any VALUE. +// 5. A requirement with just !KEY requires that the KEY not exist. func Parse(selector string, opts ...field.PathOption) (Selector, error) { parsedSelector, err := parse(selector, field.ToPath(opts...)) if err == nil { @@ -948,6 +938,8 @@ func ValidatedSelectorFromSet(ls Set) (Selector, error) { // SelectorFromValidatedSet returns a Selector which will match exactly the given Set. // A nil and empty Sets are considered equivalent to Everything(). // It assumes that Set is already validated and doesn't do any validation. +// Note: this method copies the Set; if the Set is immutable, consider wrapping it with ValidatedSetSelector +// instead, which does not copy. func SelectorFromValidatedSet(ls Set) Selector { if ls == nil || len(ls) == 0 { return internalSelector{} @@ -969,3 +961,76 @@ func SelectorFromValidatedSet(ls Set) Selector { func ParseToRequirements(selector string, opts ...field.PathOption) ([]Requirement, error) { return parse(selector, field.ToPath(opts...)) } + +// ValidatedSetSelector wraps a Set, allowing it to implement the Selector interface. Unlike +// Set.AsSelectorPreValidated (which copies the input Set), this type simply wraps the underlying +// Set. As a result, it is substantially more efficient. A nil and empty Sets are considered +// equivalent to Everything(). +// +// Callers MUST ensure the underlying Set is not mutated, and that it is already validated. If these +// constraints are not met, Set.AsValidatedSelector should be preferred +// +// None of the Selector methods mutate the underlying Set, but Add() and Requirements() convert to +// the less optimized version. +type ValidatedSetSelector Set + +func (s ValidatedSetSelector) Matches(labels Labels) bool { + for k, v := range s { + if !labels.Has(k) || v != labels.Get(k) { + return false + } + } + return true +} + +func (s ValidatedSetSelector) Empty() bool { + return len(s) == 0 +} + +func (s ValidatedSetSelector) String() string { + keys := make([]string, 0, len(s)) + for k := range s { + keys = append(keys, k) + } + // Ensure deterministic output + sort.Strings(keys) + b := strings.Builder{} + for i, key := range keys { + v := s[key] + b.Grow(len(key) + 2 + len(v)) + if i != 0 { + b.WriteString(",") + } + b.WriteString(key) + b.WriteString("=") + b.WriteString(v) + } + return b.String() +} + +func (s ValidatedSetSelector) Add(r ...Requirement) Selector { + return s.toFullSelector().Add(r...) +} + +func (s ValidatedSetSelector) Requirements() (requirements Requirements, selectable bool) { + return s.toFullSelector().Requirements() +} + +func (s ValidatedSetSelector) DeepCopySelector() Selector { + res := make(ValidatedSetSelector, len(s)) + for k, v := range s { + res[k] = v + } + return res +} + +func (s ValidatedSetSelector) RequiresExactMatch(label string) (value string, found bool) { + v, f := s[label] + return v, f +} + +func (s ValidatedSetSelector) toFullSelector() Selector { + return SelectorFromValidatedSet(Set(s)) +} + +var _ Selector = ValidatedSetSelector{} diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go index b21eb664e3fe..54ccb7a74c77 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go @@ -191,8 +191,7 @@ func (gv GroupVersion) Identifier() string { // if none of the options match the group. It prefers a match to group and version over just group. // TODO: Move GroupVersion to a package under pkg/runtime, since it's used by scheme. // TODO: Introduce an adapter type between GroupVersion and runtime.GroupVersioner, and use LegacyCodec(GroupVersion) -// -// in fewer places. +// in fewer places. func (gv GroupVersion) KindForGroupVersionKinds(kinds []GroupVersionKind) (target GroupVersionKind, ok bool) { for _, gvk := range kinds { if gvk.Group == gv.Group && gvk.Version == gv.Version { @@ -240,8 +239,7 @@ func (gv GroupVersion) WithResource(resource string) GroupVersionResource { // GroupVersions can be used to represent a set of desired group versions. // TODO: Move GroupVersions to a package under pkg/runtime, since it's used by scheme. // TODO: Introduce an adapter type between GroupVersions and runtime.GroupVersioner, and use LegacyCodec(GroupVersion) -// -// in fewer places. +// in fewer places. type GroupVersions []GroupVersion // Identifier implements runtime.GroupVersioner interface. diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go index 18b25a994b8c..a5b116718d59 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go @@ -118,8 +118,7 @@ func (s *Scheme) Converter() *conversion.Converter { // API group and version that would never be updated. // // TODO: there is discussion about removing unversioned and replacing it with objects that are manifest into -// -// every version with particular schemas. Resolve this method at that point. +// every version with particular schemas. Resolve this method at that point. func (s *Scheme) AddUnversionedTypes(version schema.GroupVersion, types ...Object) { s.addObservedVersion(version) s.AddKnownTypes(version, types...) diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go index 21944f2d8fda..ff9820842046 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go @@ -259,8 +259,7 @@ func (f CodecFactory) SupportedMediaTypes() []runtime.SerializerInfo { // invoke CodecForVersions. Callers that need only to read data should use UniversalDecoder(). // // TODO: make this call exist only in pkg/api, and initialize it with the set of default versions. -// -// All other callers will be forced to request a Codec directly. +// All other callers will be forced to request a Codec directly. func (f CodecFactory) LegacyCodec(version ...schema.GroupVersion) runtime.Codec { return versioning.NewDefaultingCodecForScheme(f.scheme, f.legacySerializer, f.universal, schema.GroupVersions(version), runtime.InternalGroupVersioner) } diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go index ea0481799b7e..27c3d2d56451 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go @@ -184,12 +184,15 @@ func (s *SpdyRoundTripper) dialWithHttpProxy(req *http.Request, proxyURL *url.UR //nolint:staticcheck // SA1019 ignore deprecated httputil.NewProxyClientConn proxyClientConn := httputil.NewProxyClientConn(proxyDialConn, nil) - _, err = proxyClientConn.Do(&proxyReq) + response, err := proxyClientConn.Do(&proxyReq) //nolint:staticcheck // SA1019 ignore deprecated httputil.ErrPersistEOF: it might be // returned from the invocation of proxyClientConn.Do if err != nil && err != httputil.ErrPersistEOF { return nil, err } + if response != nil && response.StatusCode >= 300 || response.StatusCode < 200 { + return nil, fmt.Errorf("CONNECT request to %s returned response: %s", proxyURL.Redacted(), response.Status) + } rwc, _ := proxyClientConn.Hijack() @@ -294,9 +297,10 @@ func (s *SpdyRoundTripper) proxyAuth(proxyURL *url.URL) string { if proxyURL == nil || proxyURL.User == nil { return "" } - credentials := proxyURL.User.String() - encodedAuth := base64.StdEncoding.EncodeToString([]byte(credentials)) - return fmt.Sprintf("Basic %s", encodedAuth) + username := proxyURL.User.Username() + password, _ := proxyURL.User.Password() + auth := username + ":" + password + return "Basic " + base64.StdEncoding.EncodeToString([]byte(auth)) } // RoundTrip executes the Request and upgrades it. After a successful upgrade, diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go index e39627568227..a20efd187159 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go @@ -88,8 +88,7 @@ func toYAML(v interface{}) (string, error) { // supports JSON merge patch semantics. // // NOTE: Numbers with different types (e.g. int(0) vs int64(0)) will be detected as conflicts. -// -// Make sure the unmarshaling of left and right are consistent (e.g. use the same library). +// Make sure the unmarshaling of left and right are consistent (e.g. use the same library). func HasConflicts(left, right interface{}) (bool, error) { switch typedLeft := left.(type) { case map[string]interface{}: diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS index 4443bafd137b..73244449f2c0 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS @@ -1,6 +1,7 @@ # See the OWNERS docs at https://go.k8s.io/owners approvers: + - apelisse - pwittrock reviewers: - apelisse diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go index e767092dd877..0b8a6cb354ab 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go @@ -191,7 +191,13 @@ func IsDNS1123Label(value string) []string { errs = append(errs, MaxLenError(DNS1123LabelMaxLength)) } if !dns1123LabelRegexp.MatchString(value) { - errs = append(errs, RegexError(dns1123LabelErrMsg, dns1123LabelFmt, "my-name", "123-abc")) + if dns1123SubdomainRegexp.MatchString(value) { + // It was a valid subdomain and not a valid label. Since we + // already checked length, it must be dots. + errs = append(errs, "must not contain dots") + } else { + errs = append(errs, RegexError(dns1123LabelErrMsg, dns1123LabelFmt, "my-name", "123-abc")) + } } return errs } diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go index 137627b4050c..4e531b330f45 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go @@ -223,6 +223,33 @@ func (cf ConditionFunc) WithContext() ConditionWithContextFunc { } } +// ContextForChannel provides a context that will be treated as cancelled +// when the provided parentCh is closed. The implementation returns +// context.Canceled for Err() if and only if the parentCh is closed. +func ContextForChannel(parentCh <-chan struct{}) context.Context { + return channelContext{stopCh: parentCh} +} + +var _ context.Context = channelContext{} + +// channelContext will behave as if the context were cancelled when stopCh is +// closed. +type channelContext struct { + stopCh <-chan struct{} +} + +func (c channelContext) Done() <-chan struct{} { return c.stopCh } +func (c channelContext) Err() error { + select { + case <-c.stopCh: + return context.Canceled + default: + return nil + } +} +func (c channelContext) Deadline() (time.Time, bool) { return time.Time{}, false } +func (c channelContext) Value(key any) any { return nil } + // runConditionWithCrashProtection runs a ConditionFunc with crash protection func runConditionWithCrashProtection(condition ConditionFunc) (bool, error) { return runConditionWithCrashProtectionWithContext(context.TODO(), condition.WithContext()) @@ -290,25 +317,6 @@ func (b *Backoff) Step() time.Duration { return duration } -// ContextForChannel derives a child context from a parent channel. -// -// The derived context's Done channel is closed when the returned cancel function -// is called or when the parent channel is closed, whichever happens first. -// -// Note the caller must *always* call the CancelFunc, otherwise resources may be leaked. -func ContextForChannel(parentCh <-chan struct{}) (context.Context, context.CancelFunc) { - ctx, cancel := context.WithCancel(context.Background()) - - go func() { - select { - case <-parentCh: - cancel() - case <-ctx.Done(): - } - }() - return ctx, cancel -} - // BackoffManager manages backoff with a particular scheme based on its underlying implementation. It provides // an interface to return a timer for backoff, and caller shall backoff until Timer.C() drains. If the second Backoff() // is called before the timer from the first Backoff() call finishes, the first timer will NOT be drained and result in @@ -466,9 +474,7 @@ func PollWithContext(ctx context.Context, interval, timeout time.Duration, condi // PollUntil always waits interval before the first run of 'condition'. // 'condition' will always be invoked at least once. func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { - ctx, cancel := ContextForChannel(stopCh) - defer cancel() - return PollUntilWithContext(ctx, interval, condition.WithContext()) + return PollUntilWithContext(ContextForChannel(stopCh), interval, condition.WithContext()) } // PollUntilWithContext tries a condition func until it returns true, @@ -533,9 +539,7 @@ func PollImmediateWithContext(ctx context.Context, interval, timeout time.Durati // PollImmediateUntil runs the 'condition' before waiting for the interval. // 'condition' will always be invoked at least once. func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { - ctx, cancel := ContextForChannel(stopCh) - defer cancel() - return PollImmediateUntilWithContext(ctx, interval, condition.WithContext()) + return PollImmediateUntilWithContext(ContextForChannel(stopCh), interval, condition.WithContext()) } // PollImmediateUntilWithContext tries a condition func until it returns true, @@ -577,7 +581,7 @@ func PollImmediateInfiniteWithContext(ctx context.Context, interval time.Duratio // wait: user specified WaitFunc function that controls at what interval the condition // function should be invoked periodically and whether it is bound by a timeout. // condition: user specified ConditionWithContextFunc function. -func poll(ctx context.Context, immediate bool, wait WaitWithContextFunc, condition ConditionWithContextFunc) error { +func poll(ctx context.Context, immediate bool, wait waitWithContextFunc, condition ConditionWithContextFunc) error { if immediate { done, err := runConditionWithCrashProtectionWithContext(ctx, condition) if err != nil { @@ -593,55 +597,36 @@ func poll(ctx context.Context, immediate bool, wait WaitWithContextFunc, conditi // returning ctx.Err() will break backward compatibility return ErrWaitTimeout default: - return WaitForWithContext(ctx, wait, condition) + return waitForWithContext(ctx, wait, condition) } } -// WaitFunc creates a channel that receives an item every time a test +// waitFunc creates a channel that receives an item every time a test // should be executed and is closed when the last test should be invoked. -type WaitFunc func(done <-chan struct{}) <-chan struct{} +type waitFunc func(done <-chan struct{}) <-chan struct{} // WithContext converts the WaitFunc to an equivalent WaitWithContextFunc -func (w WaitFunc) WithContext() WaitWithContextFunc { +func (w waitFunc) WithContext() waitWithContextFunc { return func(ctx context.Context) <-chan struct{} { return w(ctx.Done()) } } -// WaitWithContextFunc creates a channel that receives an item every time a test +// waitWithContextFunc creates a channel that receives an item every time a test // should be executed and is closed when the last test should be invoked. // // When the specified context gets cancelled or expires the function // stops sending item and returns immediately. -type WaitWithContextFunc func(ctx context.Context) <-chan struct{} - -// WaitFor continually checks 'fn' as driven by 'wait'. -// -// WaitFor gets a channel from 'wait()”, and then invokes 'fn' once for every value -// placed on the channel and once more when the channel is closed. If the channel is closed -// and 'fn' returns false without error, WaitFor returns ErrWaitTimeout. // -// If 'fn' returns an error the loop ends and that error is returned. If -// 'fn' returns true the loop ends and nil is returned. -// -// ErrWaitTimeout will be returned if the 'done' channel is closed without fn ever -// returning true. -// -// When the done channel is closed, because the golang `select` statement is -// "uniform pseudo-random", the `fn` might still run one or multiple time, -// though eventually `WaitFor` will return. -func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error { - ctx, cancel := ContextForChannel(done) - defer cancel() - return WaitForWithContext(ctx, wait.WithContext(), fn.WithContext()) -} +// Deprecated: Will be removed when the legacy Poll methods are removed. +type waitWithContextFunc func(ctx context.Context) <-chan struct{} -// WaitForWithContext continually checks 'fn' as driven by 'wait'. +// waitForWithContext continually checks 'fn' as driven by 'wait'. // -// WaitForWithContext gets a channel from 'wait()”, and then invokes 'fn' +// waitForWithContext gets a channel from 'wait()”, and then invokes 'fn' // once for every value placed on the channel and once more when the // channel is closed. If the channel is closed and 'fn' -// returns false without error, WaitForWithContext returns ErrWaitTimeout. +// returns false without error, waitForWithContext returns ErrWaitTimeout. // // If 'fn' returns an error the loop ends and that error is returned. If // 'fn' returns true the loop ends and nil is returned. @@ -651,8 +636,10 @@ func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error { // // When the ctx.Done() channel is closed, because the golang `select` statement is // "uniform pseudo-random", the `fn` might still run one or multiple times, -// though eventually `WaitForWithContext` will return. -func WaitForWithContext(ctx context.Context, wait WaitWithContextFunc, fn ConditionWithContextFunc) error { +// though eventually `waitForWithContext` will return. +// +// Deprecated: Will be removed when the legacy Poll methods are removed. +func waitForWithContext(ctx context.Context, wait waitWithContextFunc, fn ConditionWithContextFunc) error { waitCtx, cancel := context.WithCancel(context.Background()) defer cancel() c := wait(waitCtx) @@ -686,8 +673,8 @@ func WaitForWithContext(ctx context.Context, wait WaitWithContextFunc, fn Condit // // Output ticks are not buffered. If the channel is not ready to receive an // item, the tick is skipped. -func poller(interval, timeout time.Duration) WaitWithContextFunc { - return WaitWithContextFunc(func(ctx context.Context) <-chan struct{} { +func poller(interval, timeout time.Duration) waitWithContextFunc { + return waitWithContextFunc(func(ctx context.Context) <-chan struct{} { ch := make(chan struct{}) go func() { @@ -729,7 +716,7 @@ func poller(interval, timeout time.Duration) WaitWithContextFunc { // ExponentialBackoffWithContext works with a request context and a Backoff. It ensures that the retry wait never // exceeds the deadline specified by the request context. -func ExponentialBackoffWithContext(ctx context.Context, backoff Backoff, condition ConditionFunc) error { +func ExponentialBackoffWithContext(ctx context.Context, backoff Backoff, condition ConditionWithContextFunc) error { for backoff.Steps > 0 { select { case <-ctx.Done(): @@ -737,7 +724,7 @@ func ExponentialBackoffWithContext(ctx context.Context, backoff Backoff, conditi default: } - if ok, err := runConditionWithCrashProtection(condition); err != nil || ok { + if ok, err := runConditionWithCrashProtectionWithContext(ctx, condition); err != nil || ok { return err } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager.go index ea58e6c32679..daee67859918 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager.go @@ -19,7 +19,6 @@ package configuration import ( "fmt" "sort" - "sync/atomic" "k8s.io/api/admissionregistration/v1" "k8s.io/apimachinery/pkg/labels" @@ -29,18 +28,14 @@ import ( "k8s.io/client-go/informers" admissionregistrationlisters "k8s.io/client-go/listers/admissionregistration/v1" "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/cache/synctrack" ) // mutatingWebhookConfigurationManager collects the mutating webhook objects so that they can be called. type mutatingWebhookConfigurationManager struct { - configuration *atomic.Value - lister admissionregistrationlisters.MutatingWebhookConfigurationLister - hasSynced func() bool - // initialConfigurationSynced tracks if - // the existing webhook configs have been synced (honored) by the - // manager at startup-- the informer has synced and either has no items - // or has finished executing updateConfiguration() once. - initialConfigurationSynced *atomic.Bool + lister admissionregistrationlisters.MutatingWebhookConfigurationLister + hasSynced func() bool + lazy synctrack.Lazy[[]webhook.WebhookAccessor] } var _ generic.Source = &mutatingWebhookConfigurationManager{} @@ -48,62 +43,39 @@ var _ generic.Source = &mutatingWebhookConfigurationManager{} func NewMutatingWebhookConfigurationManager(f informers.SharedInformerFactory) generic.Source { informer := f.Admissionregistration().V1().MutatingWebhookConfigurations() manager := &mutatingWebhookConfigurationManager{ - configuration: &atomic.Value{}, - lister: informer.Lister(), - hasSynced: informer.Informer().HasSynced, - initialConfigurationSynced: &atomic.Bool{}, + lister: informer.Lister(), } + manager.lazy.Evaluate = manager.getConfiguration - // Start with an empty list - manager.configuration.Store([]webhook.WebhookAccessor{}) - manager.initialConfigurationSynced.Store(false) - - // On any change, rebuild the config - informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(_ interface{}) { manager.updateConfiguration() }, - UpdateFunc: func(_, _ interface{}) { manager.updateConfiguration() }, - DeleteFunc: func(_ interface{}) { manager.updateConfiguration() }, + handle, _ := informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(_ interface{}) { manager.lazy.Notify() }, + UpdateFunc: func(_, _ interface{}) { manager.lazy.Notify() }, + DeleteFunc: func(_ interface{}) { manager.lazy.Notify() }, }) + manager.hasSynced = handle.HasSynced return manager } // Webhooks returns the merged MutatingWebhookConfiguration. func (m *mutatingWebhookConfigurationManager) Webhooks() []webhook.WebhookAccessor { - return m.configuration.Load().([]webhook.WebhookAccessor) -} - -// HasSynced returns true when the manager is synced with existing webhookconfig -// objects at startup-- which means the informer is synced and either has no items -// or updateConfiguration() has completed. -func (m *mutatingWebhookConfigurationManager) HasSynced() bool { - if !m.hasSynced() { - return false - } - if m.initialConfigurationSynced.Load() { - // the informer has synced and configuration has been updated - return true - } - if configurations, err := m.lister.List(labels.Everything()); err == nil && len(configurations) == 0 { - // the empty list we initially stored is valid to use. - // Setting initialConfigurationSynced to true, so subsequent checks - // would be able to take the fast path on the atomic boolean in a - // cluster without any admission webhooks configured. - m.initialConfigurationSynced.Store(true) - // the informer has synced and we don't have any items - return true + out, err := m.lazy.Get() + if err != nil { + utilruntime.HandleError(fmt.Errorf("error getting webhook configuration: %v", err)) } - return false + return out } -func (m *mutatingWebhookConfigurationManager) updateConfiguration() { +// HasSynced returns true if the initial set of mutating webhook configurations +// has been loaded. +func (m *mutatingWebhookConfigurationManager) HasSynced() bool { return m.hasSynced() } + +func (m *mutatingWebhookConfigurationManager) getConfiguration() ([]webhook.WebhookAccessor, error) { configurations, err := m.lister.List(labels.Everything()) if err != nil { - utilruntime.HandleError(fmt.Errorf("error updating configuration: %v", err)) - return + return []webhook.WebhookAccessor{}, err } - m.configuration.Store(mergeMutatingWebhookConfigurations(configurations)) - m.initialConfigurationSynced.Store(true) + return mergeMutatingWebhookConfigurations(configurations), nil } func mergeMutatingWebhookConfigurations(configurations []*v1.MutatingWebhookConfiguration) []webhook.WebhookAccessor { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager.go index 00f954251f46..f318b5012938 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager.go @@ -19,7 +19,6 @@ package configuration import ( "fmt" "sort" - "sync/atomic" "k8s.io/api/admissionregistration/v1" "k8s.io/apimachinery/pkg/labels" @@ -29,18 +28,14 @@ import ( "k8s.io/client-go/informers" admissionregistrationlisters "k8s.io/client-go/listers/admissionregistration/v1" "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/cache/synctrack" ) // validatingWebhookConfigurationManager collects the validating webhook objects so that they can be called. type validatingWebhookConfigurationManager struct { - configuration *atomic.Value - lister admissionregistrationlisters.ValidatingWebhookConfigurationLister - hasSynced func() bool - // initialConfigurationSynced tracks if - // the existing webhook configs have been synced (honored) by the - // manager at startup-- the informer has synced and either has no items - // or has finished executing updateConfiguration() once. - initialConfigurationSynced *atomic.Bool + lister admissionregistrationlisters.ValidatingWebhookConfigurationLister + hasSynced func() bool + lazy synctrack.Lazy[[]webhook.WebhookAccessor] } var _ generic.Source = &validatingWebhookConfigurationManager{} @@ -48,63 +43,39 @@ var _ generic.Source = &validatingWebhookConfigurationManager{} func NewValidatingWebhookConfigurationManager(f informers.SharedInformerFactory) generic.Source { informer := f.Admissionregistration().V1().ValidatingWebhookConfigurations() manager := &validatingWebhookConfigurationManager{ - configuration: &atomic.Value{}, - lister: informer.Lister(), - hasSynced: informer.Informer().HasSynced, - initialConfigurationSynced: &atomic.Bool{}, + lister: informer.Lister(), } + manager.lazy.Evaluate = manager.getConfiguration - // Start with an empty list - manager.configuration.Store([]webhook.WebhookAccessor{}) - manager.initialConfigurationSynced.Store(false) - - // On any change, rebuild the config - informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(_ interface{}) { manager.updateConfiguration() }, - UpdateFunc: func(_, _ interface{}) { manager.updateConfiguration() }, - DeleteFunc: func(_ interface{}) { manager.updateConfiguration() }, + handle, _ := informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(_ interface{}) { manager.lazy.Notify() }, + UpdateFunc: func(_, _ interface{}) { manager.lazy.Notify() }, + DeleteFunc: func(_ interface{}) { manager.lazy.Notify() }, }) + manager.hasSynced = handle.HasSynced return manager } // Webhooks returns the merged ValidatingWebhookConfiguration. func (v *validatingWebhookConfigurationManager) Webhooks() []webhook.WebhookAccessor { - return v.configuration.Load().([]webhook.WebhookAccessor) -} - -// HasSynced returns true when the manager is synced with existing webhookconfig -// objects at startup-- which means the informer is synced and either has no items -// or updateConfiguration() has completed. -func (v *validatingWebhookConfigurationManager) HasSynced() bool { - if !v.hasSynced() { - return false - } - if v.initialConfigurationSynced.Load() { - // the informer has synced and configuration has been updated - return true - } - if configurations, err := v.lister.List(labels.Everything()); err == nil && len(configurations) == 0 { - // the empty list we initially stored is valid to use. - // Setting initialConfigurationSynced to true, so subsequent checks - // would be able to take the fast path on the atomic boolean in a - // cluster without any admission webhooks configured. - v.initialConfigurationSynced.Store(true) - // the informer has synced and we don't have any items - return true + out, err := v.lazy.Get() + if err != nil { + utilruntime.HandleError(fmt.Errorf("error getting webhook configuration: %v", err)) } - return false - + return out } -func (v *validatingWebhookConfigurationManager) updateConfiguration() { +// HasSynced returns true if the initial set of mutating webhook configurations +// has been loaded. +func (v *validatingWebhookConfigurationManager) HasSynced() bool { return v.hasSynced() } + +func (v *validatingWebhookConfigurationManager) getConfiguration() ([]webhook.WebhookAccessor, error) { configurations, err := v.lister.List(labels.Everything()) if err != nil { - utilruntime.HandleError(fmt.Errorf("error updating configuration: %v", err)) - return + return []webhook.WebhookAccessor{}, err } - v.configuration.Store(mergeValidatingWebhookConfigurations(configurations)) - v.initialConfigurationSynced.Store(true) + return mergeValidatingWebhookConfigurations(configurations), nil } func mergeValidatingWebhookConfigurations(configurations []*v1.ValidatingWebhookConfiguration) []webhook.WebhookAccessor { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller.go index 4398aa6b133d..6767c7d11fba 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller.go @@ -22,6 +22,7 @@ import ( "fmt" "sync" "sync/atomic" + "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -33,12 +34,14 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/admission" celmetrics "k8s.io/apiserver/pkg/admission/cel" "k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/internal/generic" "k8s.io/client-go/dynamic" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" ) var _ CELPolicyEvaluator = &celAdmissionController{} @@ -46,44 +49,24 @@ var _ CELPolicyEvaluator = &celAdmissionController{} // celAdmissionController is the top-level controller for admission control using CEL // it is responsible for watching policy definitions, bindings, and config param CRDs type celAdmissionController struct { - // Context under which the controller runs - runningContext context.Context - - policyDefinitionsController generic.Controller[*v1alpha1.ValidatingAdmissionPolicy] - policyBindingController generic.Controller[*v1alpha1.ValidatingAdmissionPolicyBinding] - - // dynamicclient used to create informers to watch the param crd types - dynamicClient dynamic.Interface - restMapper meta.RESTMapper - - // Provided to the policy's Compile function as an injected dependency to - // assist with compiling its expressions to CEL - validatorCompiler ValidatorCompiler - - // Lock which protects: - // - definitionInfo - // - bindingInfos - // - paramCRDControllers - // - definitionsToBindings - // All other fields should be assumed constant - mutex sync.RWMutex - - // controller and metadata - paramsCRDControllers map[v1alpha1.ParamKind]*paramInfo - - // Index for each definition namespace/name, contains all binding - // namespace/names known to exist for that definition - definitionInfo map[namespacedName]*definitionInfo - - // Index for each bindings namespace/name. Contains compiled templates - // for the binding depending on the policy/param combination. - bindingInfos map[namespacedName]*bindingInfo - - // Map from namespace/name of a definition to a set of namespace/name - // of bindings which depend on it. - // All keys must have at least one dependent binding - // All binding names MUST exist as a key bindingInfos - definitionsToBindings map[namespacedName]sets.Set[namespacedName] + // Controller which manages book-keeping for the cluster's dynamic policy + // information. + policyController *policyController + + // atomic []policyData + // list of every known policy definition, and all informatoin required to + // validate its bindings against an object. + // A snapshot of the current policy configuration is synced with this field + // asynchronously + definitions atomic.Value +} + +// Everything someone might need to validate a single ValidatingPolicyDefinition +// against all of its registered bindings. +type policyData struct { + definitionInfo + paramController generic.Controller[*unstructured.Unstructured] + bindings []bindingInfo } // namespaceName is used as a key in definitionInfo and bindingInfos @@ -104,7 +87,7 @@ type definitionInfo struct { type bindingInfo struct { // Compiled CEL expression turned into an validator - validator atomic.Pointer[Validator] + validator Validator // Last value seen by this controller to be used in policy enforcement // May not be nil @@ -129,65 +112,44 @@ func NewAdmissionController( restMapper meta.RESTMapper, dynamicClient dynamic.Interface, ) CELPolicyEvaluator { - matcher := matching.NewMatcher(informerFactory.Core().V1().Namespaces().Lister(), client) - validatorCompiler := &CELValidatorCompiler{ - Matcher: matcher, + return &celAdmissionController{ + definitions: atomic.Value{}, + policyController: newPolicyController( + restMapper, + dynamicClient, + &CELValidatorCompiler{ + Matcher: matching.NewMatcher(informerFactory.Core().V1().Namespaces().Lister(), client), + }, + generic.NewInformer[*v1alpha1.ValidatingAdmissionPolicy]( + informerFactory.Admissionregistration().V1alpha1().ValidatingAdmissionPolicies().Informer()), + generic.NewInformer[*v1alpha1.ValidatingAdmissionPolicyBinding]( + informerFactory.Admissionregistration().V1alpha1().ValidatingAdmissionPolicyBindings().Informer()), + ), } - c := &celAdmissionController{ - definitionInfo: make(map[namespacedName]*definitionInfo), - bindingInfos: make(map[namespacedName]*bindingInfo), - paramsCRDControllers: make(map[v1alpha1.ParamKind]*paramInfo), - definitionsToBindings: make(map[namespacedName]sets.Set[namespacedName]), - dynamicClient: dynamicClient, - validatorCompiler: validatorCompiler, - restMapper: restMapper, - } - - c.policyDefinitionsController = generic.NewController( - generic.NewInformer[*v1alpha1.ValidatingAdmissionPolicy]( - informerFactory.Admissionregistration().V1alpha1().ValidatingAdmissionPolicies().Informer()), - c.reconcilePolicyDefinition, - generic.ControllerOptions{ - Workers: 1, - Name: "cel-policy-definitions", - }, - ) - c.policyBindingController = generic.NewController( - generic.NewInformer[*v1alpha1.ValidatingAdmissionPolicyBinding]( - informerFactory.Admissionregistration().V1alpha1().ValidatingAdmissionPolicyBindings().Informer()), - c.reconcilePolicyBinding, - generic.ControllerOptions{ - Workers: 1, - Name: "cel-policy-bindings", - }, - ) - return c } func (c *celAdmissionController) Run(stopCh <-chan struct{}) { - if c.runningContext != nil { - return - } - ctx, cancel := context.WithCancel(context.Background()) - - c.runningContext = ctx - defer func() { - c.runningContext = nil - }() - wg := sync.WaitGroup{} wg.Add(1) go func() { defer wg.Done() - c.policyDefinitionsController.Run(ctx) + c.policyController.Run(ctx) }() wg.Add(1) go func() { defer wg.Done() - c.policyBindingController.Run(ctx) + + // Wait indefinitely until policies/bindings are listed & handled before + // allowing policies to be refreshed + if !cache.WaitForNamedCacheSync("cel-admission-controller", ctx.Done(), c.policyController.HasSynced) { + return + } + + // Loop every 1 second until context is cancelled, refreshing policies + wait.Until(c.refreshPolicies, 1*time.Second, ctx.Done()) }() <-stopCh @@ -200,8 +162,9 @@ func (c *celAdmissionController) Validate( a admission.Attributes, o admission.ObjectInterfaces, ) (err error) { - c.mutex.RLock() - defer c.mutex.RUnlock() + if !c.HasSynced() { + return admission.NewForbidden(a, fmt.Errorf("not yet ready to handle request")) + } var deniedDecisions []policyDecisionWithMetadata @@ -245,9 +208,11 @@ func (c *celAdmissionController) Validate( }) } } - for definitionNamespacedName, definitionInfo := range c.definitionInfo { + policyDatas := c.definitions.Load().([]policyData) + + for _, definitionInfo := range policyDatas { definition := definitionInfo.lastReconciledValue - matches, matchKind, err := c.validatorCompiler.DefinitionMatches(a, o, definition) + matches, matchKind, err := c.policyController.DefinitionMatches(a, o, definition) if err != nil { // Configuration error. addConfigError(err, definition, nil) @@ -262,17 +227,11 @@ func (c *celAdmissionController) Validate( continue } - dependentBindings := c.definitionsToBindings[definitionNamespacedName] - if len(dependentBindings) == 0 { - continue - } - - for namespacedBindingName := range dependentBindings { + for _, bindingInfo := range definitionInfo.bindings { // If the key is inside dependentBindings, there is guaranteed to // be a bindingInfo for it - bindingInfo := c.bindingInfos[namespacedBindingName] binding := bindingInfo.lastReconciledValue - matches, err := c.validatorCompiler.BindingMatches(a, o, binding) + matches, err := c.policyController.BindingMatches(a, o, binding) if err != nil { // Configuration error. addConfigError(err, definition, binding) @@ -289,11 +248,8 @@ func (c *celAdmissionController) Validate( paramKind := definition.Spec.ParamKind paramRef := binding.Spec.ParamRef if paramKind != nil && paramRef != nil { - - // Find the params referred by the binding by looking its name up - // in our informer for its CRD - paramInfo, ok := c.paramsCRDControllers[*paramKind] - if !ok { + paramController := definitionInfo.paramController + if paramController == nil { addConfigError(fmt.Errorf("paramKind kind `%v` not known", paramKind.String()), definition, binding) continue @@ -302,18 +258,19 @@ func (c *celAdmissionController) Validate( // If the param informer for this admission policy has not yet // had time to perform an initial listing, don't attempt to use // it. - //!TOOD(alexzielenski): add a wait for a very short amount of - // time for the cache to sync - if !paramInfo.controller.HasSynced() { + timeoutCtx, cancel := context.WithTimeout(c.policyController.context, 1*time.Second) + defer cancel() + + if !cache.WaitForCacheSync(timeoutCtx.Done(), paramController.HasSynced) { addConfigError(fmt.Errorf("paramKind kind `%v` not yet synced to use for admission", paramKind.String()), definition, binding) continue } if len(paramRef.Namespace) == 0 { - param, err = paramInfo.controller.Informer().Get(paramRef.Name) + param, err = paramController.Informer().Get(paramRef.Name) } else { - param, err = paramInfo.controller.Informer().Namespaced(paramRef.Namespace).Get(paramRef.Name) + param, err = paramController.Informer().Namespaced(paramRef.Namespace).Get(paramRef.Name) } if err != nil { @@ -335,17 +292,7 @@ func (c *celAdmissionController) Validate( continue } } - - validator := bindingInfo.validator.Load() - if validator == nil { - // Compile policy definition using binding - newValidator := c.validatorCompiler.Compile(definition) - validator = &newValidator - - bindingInfo.validator.Store(validator) - } - - decisions, err := (*validator).Validate(a, o, param, matchKind) + decisions, err := bindingInfo.validator.Validate(a, o, param, matchKind) if err != nil { // runtime error. Apply failure policy wrappedError := fmt.Errorf("failed to evaluate CEL expression: %w", err) @@ -397,10 +344,13 @@ func (c *celAdmissionController) Validate( } func (c *celAdmissionController) HasSynced() bool { - return c.policyBindingController.HasSynced() && - c.policyDefinitionsController.HasSynced() + return c.policyController.HasSynced() && c.definitions.Load() != nil } func (c *celAdmissionController) ValidateInitialization() error { - return c.validatorCompiler.ValidateInitialization() + return c.policyController.ValidateInitialization() +} + +func (c *celAdmissionController) refreshPolicies() { + c.definitions.Store(c.policyController.latestPolicyData()) } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller_reconcile.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller_reconcile.go index cbc89b518d65..bb2289f1b633 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller_reconcile.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller_reconcile.go @@ -19,23 +19,133 @@ package validatingadmissionpolicy import ( "context" "fmt" + "sync" "time" "k8s.io/api/admissionregistration/v1alpha1" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" celmetrics "k8s.io/apiserver/pkg/admission/cel" "k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/internal/generic" + "k8s.io/client-go/dynamic" "k8s.io/client-go/dynamic/dynamicinformer" "k8s.io/client-go/tools/cache" ) -func (c *celAdmissionController) reconcilePolicyDefinition(namespace, name string, definition *v1alpha1.ValidatingAdmissionPolicy) error { +type policyController struct { + once sync.Once + context context.Context + dynamicClient dynamic.Interface + restMapper meta.RESTMapper + policyDefinitionsController generic.Controller[*v1alpha1.ValidatingAdmissionPolicy] + policyBindingController generic.Controller[*v1alpha1.ValidatingAdmissionPolicyBinding] + + // Provided to the policy's Compile function as an injected dependency to + // assist with compiling its expressions to CEL + ValidatorCompiler + + // Lock which protects: + // - cachedPolicies + // - paramCRDControllers + // - definitionInfo + // - bindingInfos + // - definitionsToBindings + // All other fields should be assumed constant + mutex sync.RWMutex + + cachedPolicies []policyData + + // controller and metadata + paramsCRDControllers map[v1alpha1.ParamKind]*paramInfo + + // Index for each definition namespace/name, contains all binding + // namespace/names known to exist for that definition + definitionInfo map[namespacedName]*definitionInfo + + // Index for each bindings namespace/name. Contains compiled templates + // for the binding depending on the policy/param combination. + bindingInfos map[namespacedName]*bindingInfo + + // Map from namespace/name of a definition to a set of namespace/name + // of bindings which depend on it. + // All keys must have at least one dependent binding + // All binding names MUST exist as a key bindingInfos + definitionsToBindings map[namespacedName]sets.Set[namespacedName] +} + +func newPolicyController( + restMapper meta.RESTMapper, + dynamicClient dynamic.Interface, + validatorCompiler ValidatorCompiler, + policiesInformer generic.Informer[*v1alpha1.ValidatingAdmissionPolicy], + bindingsInformer generic.Informer[*v1alpha1.ValidatingAdmissionPolicyBinding], +) *policyController { + res := &policyController{} + *res = policyController{ + ValidatorCompiler: validatorCompiler, + definitionInfo: make(map[namespacedName]*definitionInfo), + bindingInfos: make(map[namespacedName]*bindingInfo), + paramsCRDControllers: make(map[v1alpha1.ParamKind]*paramInfo), + definitionsToBindings: make(map[namespacedName]sets.Set[namespacedName]), + policyDefinitionsController: generic.NewController( + policiesInformer, + res.reconcilePolicyDefinition, + generic.ControllerOptions{ + Workers: 1, + Name: "cel-policy-definitions", + }, + ), + policyBindingController: generic.NewController( + bindingsInformer, + res.reconcilePolicyBinding, + generic.ControllerOptions{ + Workers: 1, + Name: "cel-policy-bindings", + }, + ), + restMapper: restMapper, + dynamicClient: dynamicClient, + } + return res +} + +func (c *policyController) Run(ctx context.Context) { + // Only support being run once + c.once.Do(func() { + c.context = ctx + + wg := sync.WaitGroup{} + + wg.Add(1) + go func() { + defer wg.Done() + c.policyDefinitionsController.Run(ctx) + }() + + wg.Add(1) + go func() { + defer wg.Done() + c.policyBindingController.Run(ctx) + }() + + <-ctx.Done() + wg.Wait() + }) +} + +func (c *policyController) HasSynced() bool { + return c.policyDefinitionsController.HasSynced() && c.policyBindingController.HasSynced() +} + +func (c *policyController) reconcilePolicyDefinition(namespace, name string, definition *v1alpha1.ValidatingAdmissionPolicy) error { c.mutex.Lock() defer c.mutex.Unlock() + c.cachedPolicies = nil // invalidate cachedPolicies + // Namespace for policydefinition is empty. nn := getNamespaceName(namespace, name) info, ok := c.definitionInfo[nn] @@ -75,7 +185,7 @@ func (c *celAdmissionController) reconcilePolicyDefinition(namespace, name strin // definition has changed. for key := range c.definitionsToBindings[nn] { bindingInfo := c.bindingInfos[key] - bindingInfo.validator.Store(nil) + bindingInfo.validator = nil c.bindingInfos[key] = bindingInfo } @@ -119,16 +229,20 @@ func (c *celAdmissionController) reconcilePolicyDefinition(namespace, name strin return info.configurationError } - // Start watching the param CRD - if _, ok := c.paramsCRDControllers[*paramSource]; !ok { - instanceContext, instanceCancel := context.WithCancel(c.runningContext) + if info, ok := c.paramsCRDControllers[*paramSource]; ok { + // If a param controller is already active for this paramsource, make + // sure it is tracking this policy's dependency upon it + info.dependentDefinitions.Insert(nn) + + } else { + instanceContext, instanceCancel := context.WithCancel(c.context) // Watch for new instances of this policy informer := dynamicinformer.NewFilteredDynamicInformer( c.dynamicClient, paramsGVR.Resource, corev1.NamespaceAll, - 30*time.Second, + 30*time.Second, // TODO: do we really need to ever resync these? cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, nil, ) @@ -155,10 +269,12 @@ func (c *celAdmissionController) reconcilePolicyDefinition(namespace, name strin return nil } -func (c *celAdmissionController) reconcilePolicyBinding(namespace, name string, binding *v1alpha1.ValidatingAdmissionPolicyBinding) error { +func (c *policyController) reconcilePolicyBinding(namespace, name string, binding *v1alpha1.ValidatingAdmissionPolicyBinding) error { c.mutex.Lock() defer c.mutex.Unlock() + c.cachedPolicies = nil // invalidate cachedPolicies + // Namespace for PolicyBinding is empty. In the future a namespaced binding // may be added // https://github.com/kubernetes/enhancements/blob/bf5c3c81ea2081d60c1dc7c832faa98479e06209/keps/sig-api-machinery/3488-cel-admission-control/README.md?plain=1#L1042 @@ -208,12 +324,12 @@ func (c *celAdmissionController) reconcilePolicyBinding(namespace, name string, } // Remove compiled template for old binding - info.validator.Store(nil) + info.validator = nil info.lastReconciledValue = binding return nil } -func (c *celAdmissionController) reconcileParams(namespace, name string, params *unstructured.Unstructured) error { +func (c *policyController) reconcileParams(namespace, name string, params *unstructured.Unstructured) error { // Do nothing. // When we add informational type checking we will need to compile in the // reconcile loops instead of lazily so we can add compiler errors / type @@ -221,6 +337,52 @@ func (c *celAdmissionController) reconcileParams(namespace, name string, params return nil } +// Fetches the latest set of policy data or recalculates it if it has changed +// since it was last fetched +func (c *policyController) latestPolicyData() []policyData { + existing := func() []policyData { + c.mutex.RLock() + defer c.mutex.RUnlock() + + return c.cachedPolicies + }() + + if existing != nil { + return existing + } + + c.mutex.Lock() + defer c.mutex.Unlock() + + var res []policyData + for definitionNN, definitionInfo := range c.definitionInfo { + var bindingInfos []bindingInfo + for bindingNN := range c.definitionsToBindings[definitionNN] { + bindingInfo := c.bindingInfos[bindingNN] + if bindingInfo.validator == nil && definitionInfo.configurationError == nil { + bindingInfo.validator = c.ValidatorCompiler.Compile(definitionInfo.lastReconciledValue) + } + bindingInfos = append(bindingInfos, *bindingInfo) + } + + var paramController generic.Controller[*unstructured.Unstructured] + if paramKind := definitionInfo.lastReconciledValue.Spec.ParamKind; paramKind != nil { + if info, ok := c.paramsCRDControllers[*paramKind]; ok { + paramController = info.controller + } + } + + res = append(res, policyData{ + definitionInfo: *definitionInfo, + paramController: paramController, + bindings: bindingInfos, + }) + } + + c.cachedPolicies = res + return res +} + func getNamespaceName(namespace, name string) namespacedName { return namespacedName{ namespace: namespace, diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/internal/generic/controller.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/internal/generic/controller.go index bd5ea818d67c..4334c0dd82c1 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/internal/generic/controller.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/internal/generic/controller.go @@ -21,6 +21,7 @@ import ( "errors" "fmt" "sync" + "sync/atomic" "time" kerrors "k8s.io/apimachinery/pkg/api/errors" @@ -30,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/cache/synctrack" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" ) @@ -45,6 +47,11 @@ type controller[T runtime.Object] struct { reconciler func(namespace, name string, newObj T) error options ControllerOptions + + // must hold a func() bool or nil + notificationsDelivered atomic.Value + + hasProcessed synctrack.AsyncTracker[string] } type ControllerOptions struct { @@ -69,12 +76,20 @@ func NewController[T runtime.Object]( options.Name = fmt.Sprintf("%T-controller", *new(T)) } - return &controller[T]{ + c := &controller[T]{ options: options, informer: informer, reconciler: reconciler, queue: nil, } + c.hasProcessed.UpstreamHasSynced = func() bool { + f := c.notificationsDelivered.Load() + if f == nil { + return false + } + return f.(func() bool)() + } + return c } // Runs the controller and returns an error explaining why running was stopped. @@ -92,20 +107,22 @@ func (c *controller[T]) Run(ctx context.Context) error { // would never shut down the workqueue defer c.queue.ShutDown() - enqueue := func(obj interface{}) { + enqueue := func(obj interface{}, isInInitialList bool) { var key string var err error if key, err = cache.DeletionHandlingMetaNamespaceKeyFunc(obj); err != nil { utilruntime.HandleError(err) return } + if isInInitialList { + c.hasProcessed.Start(key) + } + c.queue.Add(key) } - registration, err := c.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - enqueue(obj) - }, + registration, err := c.informer.AddEventHandler(cache.ResourceEventHandlerDetailedFuncs{ + AddFunc: enqueue, UpdateFunc: func(oldObj, newObj interface{}) { oldMeta, err1 := meta.Accessor(oldObj) newMeta, err2 := meta.Accessor(newObj) @@ -126,11 +143,11 @@ func (c *controller[T]) Run(ctx context.Context) error { return } - enqueue(newObj) + enqueue(newObj, false) }, DeleteFunc: func(obj interface{}) { // Enqueue - enqueue(obj) + enqueue(obj, false) }, }) @@ -139,9 +156,12 @@ func (c *controller[T]) Run(ctx context.Context) error { return err } + c.notificationsDelivered.Store(registration.HasSynced) + // Make sure event handler is removed from informer in case return early from // an error defer func() { + c.notificationsDelivered.Store(func() bool { return false }) // Remove event handler and Handle Error here. Error should only be raised // for improper usage of event handler API. if err := c.informer.RemoveEventHandler(registration); err != nil { @@ -166,8 +186,8 @@ func (c *controller[T]) Run(ctx context.Context) error { for i := uint(0); i < c.options.Workers; i++ { waitGroup.Add(1) go func() { + defer waitGroup.Done() wait.Until(c.runWorker, time.Second, ctx.Done()) - waitGroup.Done() }() } @@ -188,7 +208,7 @@ func (c *controller[T]) Run(ctx context.Context) error { } func (c *controller[T]) HasSynced() bool { - return c.informer.HasSynced() + return c.hasProcessed.HasSynced() } func (c *controller[T]) runWorker() { @@ -220,6 +240,7 @@ func (c *controller[T]) runWorker() { // but the key is invalid so there is no point in doing that) return fmt.Errorf("expected string in workqueue but got %#v", obj) } + defer c.hasProcessed.Finished(key) if err := c.reconcile(key); err != nil { // Put the item back on the workqueue to handle any transient errors. diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/config/types.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/config/types.go index 72107fe66349..e81f34573bf0 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/config/types.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/config/types.go @@ -24,7 +24,23 @@ import ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// EncryptionConfiguration stores the complete configuration for encryption providers. +/* +EncryptionConfiguration stores the complete configuration for encryption providers. +example: + + kind: EncryptionConfiguration + apiVersion: apiserver.config.k8s.io/v1 + resources: + - resources: + - secrets + - configmaps + - pandas.awesome.bears.example + providers: + - aescbc: + keys: + - name: key1 + secret: c2VjcmV0IGlzIHNlY3VyZQ== +*/ type EncryptionConfiguration struct { metav1.TypeMeta // resources is a list containing resources, and their corresponding encryption providers. @@ -33,7 +49,8 @@ type EncryptionConfiguration struct { // ResourceConfiguration stores per resource configuration. type ResourceConfiguration struct { - // resources is a list of kubernetes resources which have to be encrypted. + // resources is a list of kubernetes resources which have to be encrypted. The resource names are derived from `resource` or `resource.group` of the group/version/resource. + // eg: pandas.awesome.bears.example is a custom resource with 'group': awesome.bears.example, 'resource': pandas) Resources []string // providers is a list of transformers to be used for reading and writing the resources to disk. // eg: aesgcm, aescbc, secretbox, identity. diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/config/v1/types.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/config/v1/types.go index 23dab942ea3e..522fe3167cba 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/config/v1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/config/v1/types.go @@ -24,7 +24,23 @@ import ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// EncryptionConfiguration stores the complete configuration for encryption providers. +/* +EncryptionConfiguration stores the complete configuration for encryption providers. +example: + + kind: EncryptionConfiguration + apiVersion: apiserver.config.k8s.io/v1 + resources: + - resources: + - secrets + - configmaps + - pandas.awesome.bears.example + providers: + - aescbc: + keys: + - name: key1 + secret: c2VjcmV0IGlzIHNlY3VyZQ== +*/ type EncryptionConfiguration struct { metav1.TypeMeta // resources is a list containing resources, and their corresponding encryption providers. @@ -33,7 +49,8 @@ type EncryptionConfiguration struct { // ResourceConfiguration stores per resource configuration. type ResourceConfiguration struct { - // resources is a list of kubernetes resources which have to be encrypted. + // resources is a list of kubernetes resources which have to be encrypted. The resource names are derived from `resource` or `resource.group` of the group/version/resource. + // eg: pandas.awesome.bears.example is a custom resource with 'group': awesome.bears.example, 'resource': pandas) Resources []string `json:"resources"` // providers is a list of transformers to be used for reading and writing the resources to disk. // eg: aesgcm, aescbc, secretbox, identity. diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/evaluator.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/evaluator.go index 93907dc5f33f..f9664fef69c9 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/evaluator.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/evaluator.go @@ -25,6 +25,9 @@ import ( // a given request. PolicyRuleEvaluator evaluates the audit policy against the // authorizer attributes and returns a RequestAuditConfig that applies to the request. type RequestAuditConfig struct { + // Level at which the request is being audited at + Level audit.Level + // OmitStages is the stages that need to be omitted from being audited. OmitStages []audit.Stage @@ -33,21 +36,10 @@ type RequestAuditConfig struct { OmitManagedFields bool } -// RequestAuditConfigWithLevel includes Level at which the request is being audited. -// PolicyRuleEvaluator evaluates the audit configuration for a request -// against the authorizer attributes and returns an RequestAuditConfigWithLevel -// that applies to the request. -type RequestAuditConfigWithLevel struct { - RequestAuditConfig - - // Level at which the request is being audited at - Level audit.Level -} - // PolicyRuleEvaluator exposes methods for evaluating the policy rules. type PolicyRuleEvaluator interface { // EvaluatePolicyRule evaluates the audit policy of the apiserver against // the given authorizer attributes and returns the audit configuration that // is applicable to the given equest. - EvaluatePolicyRule(authorizer.Attributes) RequestAuditConfigWithLevel + EvaluatePolicyRule(authorizer.Attributes) RequestAuditConfig } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/policy/checker.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/policy/checker.go index 6a98ff4ac04b..cd6ec92bc8ac 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/policy/checker.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/policy/checker.go @@ -61,25 +61,21 @@ type policyRuleEvaluator struct { audit.Policy } -func (p *policyRuleEvaluator) EvaluatePolicyRule(attrs authorizer.Attributes) auditinternal.RequestAuditConfigWithLevel { +func (p *policyRuleEvaluator) EvaluatePolicyRule(attrs authorizer.Attributes) auditinternal.RequestAuditConfig { for _, rule := range p.Rules { if ruleMatches(&rule, attrs) { - return auditinternal.RequestAuditConfigWithLevel{ - Level: rule.Level, - RequestAuditConfig: auditinternal.RequestAuditConfig{ - OmitStages: rule.OmitStages, - OmitManagedFields: isOmitManagedFields(&rule, p.OmitManagedFields), - }, + return auditinternal.RequestAuditConfig{ + Level: rule.Level, + OmitStages: rule.OmitStages, + OmitManagedFields: isOmitManagedFields(&rule, p.OmitManagedFields), } } } - return auditinternal.RequestAuditConfigWithLevel{ - Level: DefaultAuditLevel, - RequestAuditConfig: auditinternal.RequestAuditConfig{ - OmitStages: p.OmitStages, - OmitManagedFields: p.OmitManagedFields, - }, + return auditinternal.RequestAuditConfig{ + Level: DefaultAuditLevel, + OmitStages: p.OmitStages, + OmitManagedFields: p.OmitManagedFields, } } @@ -235,11 +231,9 @@ type fakePolicyRuleEvaluator struct { stage []audit.Stage } -func (f *fakePolicyRuleEvaluator) EvaluatePolicyRule(_ authorizer.Attributes) auditinternal.RequestAuditConfigWithLevel { - return auditinternal.RequestAuditConfigWithLevel{ - Level: f.level, - RequestAuditConfig: auditinternal.RequestAuditConfig{ - OmitStages: f.stage, - }, +func (f *fakePolicyRuleEvaluator) EvaluatePolicyRule(_ authorizer.Attributes) auditinternal.RequestAuditConfig { + return auditinternal.RequestAuditConfig{ + Level: f.level, + OmitStages: f.stage, } } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/OWNERS b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/OWNERS new file mode 100644 index 000000000000..7fb6b78f2b43 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/OWNERS @@ -0,0 +1,2 @@ +approvers: + - apelisse diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/discovery/OWNERS b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/discovery/OWNERS new file mode 100644 index 000000000000..49d71eba5142 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/discovery/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: + - alexzielenski + - jefftree diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/etag.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/etag.go index d74e376c7dd2..01cc682efd0d 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/etag.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/etag.go @@ -35,8 +35,7 @@ import ( // - Replies with 304 Not Modified, if If-None-Match header matches hash // // hash should be the value of calculateETag on object. If hash is empty, then -// -// the object is simply serialized without E-Tag functionality +// the object is simply serialized without E-Tag functionality func ServeHTTPWithETag( object runtime.Object, hash string, diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit.go index b310c94ee2af..ccb628b443e9 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit.go @@ -133,10 +133,10 @@ func evaluatePolicyAndCreateAuditEvent(req *http.Request, policy audit.PolicyRul return ac, fmt.Errorf("failed to GetAuthorizerAttributes: %v", err) } - ls := policy.EvaluatePolicyRule(attribs) - audit.ObservePolicyLevel(ctx, ls.Level) - ac.RequestAuditConfig = ls.RequestAuditConfig - if ls.Level == auditinternal.LevelNone { + rac := policy.EvaluatePolicyRule(attribs) + audit.ObservePolicyLevel(ctx, rac.Level) + ac.RequestAuditConfig = rac + if rac.Level == auditinternal.LevelNone { // Don't audit. return ac, nil } @@ -145,7 +145,7 @@ func evaluatePolicyAndCreateAuditEvent(req *http.Request, policy audit.PolicyRul if !ok { requestReceivedTimestamp = time.Now() } - ev, err := audit.NewEventFromRequest(req, requestReceivedTimestamp, ls.Level, attribs) + ev, err := audit.NewEventFromRequest(req, requestReceivedTimestamp, rac.Level, attribs) if err != nil { return nil, fmt.Errorf("failed to complete audit event from request: %v", err) } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go index fba588283901..f7648d41cedd 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go @@ -70,7 +70,7 @@ func WithAuthorization(handler http.Handler, a authorizer.Authorizer, s runtime. return } - klog.V(4).InfoS("Forbidden", "URI", req.RequestURI, "Reason", reason) + klog.V(4).InfoS("Forbidden", "URI", req.RequestURI, "reason", reason) audit.AddAuditAnnotations(ctx, decisionAnnotationKey, decisionForbid, reasonAnnotationKey, reason) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go index 1dc1fe4a5057..4803975a7354 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go @@ -109,14 +109,14 @@ func WithImpersonation(handler http.Handler, a authorizer.Authorizer, s runtime. actingAsAttributes.Resource = "uids" default: - klog.V(4).InfoS("unknown impersonation request type", "Request", impersonationRequest) + klog.V(4).InfoS("unknown impersonation request type", "request", impersonationRequest) responsewriters.Forbidden(ctx, actingAsAttributes, w, req, fmt.Sprintf("unknown impersonation request type: %v", impersonationRequest), s) return } decision, reason, err := a.Authorize(ctx, actingAsAttributes) if err != nil || decision != authorizer.DecisionAllow { - klog.V(4).InfoS("Forbidden", "URI", req.RequestURI, "Reason", reason, "Error", err) + klog.V(4).InfoS("Forbidden", "URI", req.RequestURI, "reason", reason, "err", err) responsewriters.Forbidden(ctx, actingAsAttributes, w, req, reason, s) return } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/groupversion.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/groupversion.go index 34b80b44997c..1a5a7696663d 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/groupversion.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/groupversion.go @@ -34,7 +34,7 @@ import ( "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/storageversion" - openapiproto "k8s.io/kube-openapi/pkg/util/proto" + "k8s.io/kube-openapi/pkg/validation/spec" ) // ConvertabilityChecker indicates what versions a GroupKind is available in. @@ -96,7 +96,7 @@ type APIGroupVersion struct { MinRequestTimeout time.Duration // OpenAPIModels exposes the OpenAPI models to each individual handler. - OpenAPIModels openapiproto.Models + OpenAPIModels *spec.Swagger // The limit on the request body size that would be accepted and decoded in a write request. // 0 means no limit. diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/admission.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/admission.go index 26d264fe833b..eb20d42ce821 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/admission.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/admission.go @@ -70,7 +70,7 @@ func (admit *managedFieldsValidatingAdmissionController) Admit(ctx context.Conte return err } managedFieldsAfterAdmission := objectMeta.GetManagedFields() - if _, err := DecodeManagedFields(managedFieldsAfterAdmission); err != nil { + if err := ValidateManagedFields(managedFieldsAfterAdmission); err != nil { objectMeta.SetManagedFields(managedFieldsBeforeAdmission) warning.AddWarning(ctx, "", fmt.Sprintf(InvalidManagedFieldsAfterMutatingAdmissionWarningFormat, diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go index 6c3d2ce83262..668f7688261f 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go @@ -18,247 +18,40 @@ package fieldmanager import ( "fmt" - "reflect" - "time" - "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal" - "k8s.io/klog/v2" "sigs.k8s.io/structured-merge-diff/v4/fieldpath" - "sigs.k8s.io/structured-merge-diff/v4/merge" ) -// DefaultMaxUpdateManagers defines the default maximum retained number of managedFields entries from updates -// if the number of update managers exceeds this, the oldest entries will be merged until the number is below the maximum. -// TODO(jennybuckley): Determine if this is really the best value. Ideally we wouldn't unnecessarily merge too many entries. -const DefaultMaxUpdateManagers int = 10 - -// DefaultTrackOnCreateProbability defines the default probability that the field management of an object -// starts being tracked from the object's creation, instead of from the first time the object is applied to. -const DefaultTrackOnCreateProbability float32 = 1 - -var atMostEverySecond = internal.NewAtMostEvery(time.Second) - -// Managed groups a fieldpath.ManagedFields together with the timestamps associated with each operation. -type Managed interface { - // Fields gets the fieldpath.ManagedFields. - Fields() fieldpath.ManagedFields - - // Times gets the timestamps associated with each operation. - Times() map[string]*metav1.Time -} - -// Manager updates the managed fields and merges applied configurations. -type Manager interface { - // Update is used when the object has already been merged (non-apply - // use-case), and simply updates the managed fields in the output - // object. - // * `liveObj` is not mutated by this function - // * `newObj` may be mutated by this function - // Returns the new object with managedFields removed, and the object's new - // proposed managedFields separately. - Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) - - // Apply is used when server-side apply is called, as it merges the - // object and updates the managed fields. - // * `liveObj` is not mutated by this function - // * `newObj` may be mutated by this function - // Returns the new object with managedFields removed, and the object's new - // proposed managedFields separately. - Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) -} - -// FieldManager updates the managed fields and merge applied +// FieldManager updates the managed fields and merges applied // configurations. -type FieldManager struct { - fieldManager Manager - subresource string -} - -// NewFieldManager creates a new FieldManager that decodes, manages, then re-encodes managedFields -// on update and apply requests. -func NewFieldManager(f Manager, subresource string) *FieldManager { - return &FieldManager{fieldManager: f, subresource: subresource} -} +type FieldManager = internal.FieldManager // NewDefaultFieldManager creates a new FieldManager that merges apply requests // and update managed fields for other types of requests. func NewDefaultFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (*FieldManager, error) { - f, err := NewStructuredMergeManager(typeConverter, objectConverter, objectDefaulter, kind.GroupVersion(), hub, resetFields) + f, err := internal.NewStructuredMergeManager(typeConverter, objectConverter, objectDefaulter, kind.GroupVersion(), hub, resetFields) if err != nil { return nil, fmt.Errorf("failed to create field manager: %v", err) } - return newDefaultFieldManager(f, typeConverter, objectConverter, objectCreater, kind, subresource), nil + return internal.NewDefaultFieldManager(f, typeConverter, objectConverter, objectCreater, kind, subresource), nil } // NewDefaultCRDFieldManager creates a new FieldManager specifically for // CRDs. This allows for the possibility of fields which are not defined // in models, as well as having no models defined at all. func NewDefaultCRDFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (_ *FieldManager, err error) { - f, err := NewCRDStructuredMergeManager(typeConverter, objectConverter, objectDefaulter, kind.GroupVersion(), hub, resetFields) + f, err := internal.NewCRDStructuredMergeManager(typeConverter, objectConverter, objectDefaulter, kind.GroupVersion(), hub, resetFields) if err != nil { return nil, fmt.Errorf("failed to create field manager: %v", err) } - return newDefaultFieldManager(f, typeConverter, objectConverter, objectCreater, kind, subresource), nil -} - -// newDefaultFieldManager is a helper function which wraps a Manager with certain default logic. -func newDefaultFieldManager(f Manager, typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, subresource string) *FieldManager { - return NewFieldManager( - NewLastAppliedUpdater( - NewLastAppliedManager( - NewProbabilisticSkipNonAppliedManager( - NewCapManagersManager( - NewBuildManagerInfoManager( - NewManagedFieldsUpdater( - NewStripMetaManager(f), - ), kind.GroupVersion(), subresource, - ), DefaultMaxUpdateManagers, - ), objectCreater, kind, DefaultTrackOnCreateProbability, - ), typeConverter, objectConverter, kind.GroupVersion()), - ), subresource, - ) -} - -// DecodeManagedFields converts ManagedFields from the wire format (api format) -// to the format used by sigs.k8s.io/structured-merge-diff -func DecodeManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) (Managed, error) { - return internal.DecodeManagedFields(encodedManagedFields) -} - -func decodeLiveOrNew(liveObj, newObj runtime.Object, ignoreManagedFieldsFromRequestObject bool) (Managed, error) { - liveAccessor, err := meta.Accessor(liveObj) - if err != nil { - return nil, err - } - - // We take the managedFields of the live object in case the request tries to - // manually set managedFields via a subresource. - if ignoreManagedFieldsFromRequestObject { - return emptyManagedFieldsOnErr(DecodeManagedFields(liveAccessor.GetManagedFields())) - } - - // If the object doesn't have metadata, we should just return without trying to - // set the managedFields at all, so creates/updates/patches will work normally. - newAccessor, err := meta.Accessor(newObj) - if err != nil { - return nil, err - } - - if isResetManagedFields(newAccessor.GetManagedFields()) { - return internal.NewEmptyManaged(), nil - } - - // If the managed field is empty or we failed to decode it, - // let's try the live object. This is to prevent clients who - // don't understand managedFields from deleting it accidentally. - managed, err := DecodeManagedFields(newAccessor.GetManagedFields()) - if err != nil || len(managed.Fields()) == 0 { - return emptyManagedFieldsOnErr(DecodeManagedFields(liveAccessor.GetManagedFields())) - } - return managed, nil -} - -func emptyManagedFieldsOnErr(managed Managed, err error) (Managed, error) { - if err != nil { - return internal.NewEmptyManaged(), nil - } - return managed, nil + return internal.NewDefaultFieldManager(f, typeConverter, objectConverter, objectCreater, kind, subresource), nil } -// Update is used when the object has already been merged (non-apply -// use-case), and simply updates the managed fields in the output -// object. -func (f *FieldManager) Update(liveObj, newObj runtime.Object, manager string) (object runtime.Object, err error) { - // First try to decode the managed fields provided in the update, - // This is necessary to allow directly updating managed fields. - isSubresource := f.subresource != "" - managed, err := decodeLiveOrNew(liveObj, newObj, isSubresource) - if err != nil { - return newObj, nil - } - - internal.RemoveObjectManagedFields(newObj) - - if object, managed, err = f.fieldManager.Update(liveObj, newObj, managed, manager); err != nil { - return nil, err - } - - if err = internal.EncodeObjectManagedFields(object, managed); err != nil { - return nil, fmt.Errorf("failed to encode managed fields: %v", err) - } - - return object, nil -} - -// UpdateNoErrors is the same as Update, but it will not return -// errors. If an error happens, the object is returned with -// managedFields cleared. -func (f *FieldManager) UpdateNoErrors(liveObj, newObj runtime.Object, manager string) runtime.Object { - obj, err := f.Update(liveObj, newObj, manager) - if err != nil { - atMostEverySecond.Do(func() { - ns, name := "unknown", "unknown" - if accessor, err := meta.Accessor(newObj); err == nil { - ns = accessor.GetNamespace() - name = accessor.GetName() - } - - klog.ErrorS(err, "[SHOULD NOT HAPPEN] failed to update managedFields", "VersionKind", - newObj.GetObjectKind().GroupVersionKind(), "namespace", ns, "name", name) - }) - // Explicitly remove managedFields on failure, so that - // we can't have garbage in it. - internal.RemoveObjectManagedFields(newObj) - return newObj - } - return obj -} - -// Returns true if the managedFields indicate that the user is trying to -// reset the managedFields, i.e. if the list is non-nil but empty, or if -// the list has one empty item. -func isResetManagedFields(managedFields []metav1.ManagedFieldsEntry) bool { - if len(managedFields) == 0 { - return managedFields != nil - } - - if len(managedFields) == 1 { - return reflect.DeepEqual(managedFields[0], metav1.ManagedFieldsEntry{}) - } - - return false -} - -// Apply is used when server-side apply is called, as it merges the -// object and updates the managed fields. -func (f *FieldManager) Apply(liveObj, appliedObj runtime.Object, manager string, force bool) (object runtime.Object, err error) { - // If the object doesn't have metadata, apply isn't allowed. - accessor, err := meta.Accessor(liveObj) - if err != nil { - return nil, fmt.Errorf("couldn't get accessor: %v", err) - } - - // Decode the managed fields in the live object, since it isn't allowed in the patch. - managed, err := DecodeManagedFields(accessor.GetManagedFields()) - if err != nil { - return nil, fmt.Errorf("failed to decode managed fields: %v", err) - } - - object, managed, err = f.fieldManager.Apply(liveObj, appliedObj, managed, manager, force) - if err != nil { - if conflicts, ok := err.(merge.Conflicts); ok { - return nil, internal.NewConflictError(conflicts) - } - return nil, err - } - - if err = internal.EncodeObjectManagedFields(object, managed); err != nil { - return nil, fmt.Errorf("failed to encode managed fields: %v", err) - } - - return object, nil +func ValidateManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) error { + _, err := internal.DecodeManagedFields(encodedManagedFields) + return err } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/buildmanagerinfo.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/buildmanagerinfo.go similarity index 94% rename from cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/buildmanagerinfo.go rename to cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/buildmanagerinfo.go index 58b87eb38869..fa342ca1351f 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/buildmanagerinfo.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/buildmanagerinfo.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package fieldmanager +package internal import ( "fmt" @@ -22,7 +22,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal" ) type buildManagerInfoManager struct { @@ -71,5 +70,5 @@ func (f *buildManagerInfoManager) buildManagerInfo(prefix string, operation meta if managerInfo.Manager == "" { managerInfo.Manager = "unknown" } - return internal.BuildManagerIdentifier(&managerInfo) + return BuildManagerIdentifier(&managerInfo) } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/capmanagers.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/capmanagers.go similarity index 96% rename from cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/capmanagers.go rename to cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/capmanagers.go index c3184e24154b..8951932ba420 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/capmanagers.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/capmanagers.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package fieldmanager +package internal import ( "fmt" @@ -22,7 +22,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal" "sigs.k8s.io/structured-merge-diff/v4/fieldpath" ) @@ -100,7 +99,7 @@ func (f *capManagersManager) capUpdateManagers(managed Managed) (newManaged Mana // Create a new manager identifier for the versioned bucket entry. // The version for this manager comes from the version of the update being merged into the bucket. - bucket, err := internal.BuildManagerIdentifier(&metav1.ManagedFieldsEntry{ + bucket, err := BuildManagerIdentifier(&metav1.ManagedFieldsEntry{ Manager: f.oldUpdatesManagerName, Operation: metav1.ManagedFieldsOperationUpdate, APIVersion: version, diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/fieldmanager.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/fieldmanager.go new file mode 100644 index 000000000000..f3111d4bc723 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/fieldmanager.go @@ -0,0 +1,206 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + "reflect" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/klog/v2" + "sigs.k8s.io/structured-merge-diff/v4/merge" +) + +// DefaultMaxUpdateManagers defines the default maximum retained number of managedFields entries from updates +// if the number of update managers exceeds this, the oldest entries will be merged until the number is below the maximum. +// TODO(jennybuckley): Determine if this is really the best value. Ideally we wouldn't unnecessarily merge too many entries. +const DefaultMaxUpdateManagers int = 10 + +// DefaultTrackOnCreateProbability defines the default probability that the field management of an object +// starts being tracked from the object's creation, instead of from the first time the object is applied to. +const DefaultTrackOnCreateProbability float32 = 1 + +var atMostEverySecond = NewAtMostEvery(time.Second) + +// FieldManager updates the managed fields and merges applied +// configurations. +type FieldManager struct { + fieldManager Manager + subresource string +} + +// NewFieldManager creates a new FieldManager that decodes, manages, then re-encodes managedFields +// on update and apply requests. +func NewFieldManager(f Manager, subresource string) *FieldManager { + return &FieldManager{fieldManager: f, subresource: subresource} +} + +// newDefaultFieldManager is a helper function which wraps a Manager with certain default logic. +func NewDefaultFieldManager(f Manager, typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, subresource string) *FieldManager { + return NewFieldManager( + NewLastAppliedUpdater( + NewLastAppliedManager( + NewProbabilisticSkipNonAppliedManager( + NewCapManagersManager( + NewBuildManagerInfoManager( + NewManagedFieldsUpdater( + NewStripMetaManager(f), + ), kind.GroupVersion(), subresource, + ), DefaultMaxUpdateManagers, + ), objectCreater, kind, DefaultTrackOnCreateProbability, + ), typeConverter, objectConverter, kind.GroupVersion()), + ), subresource, + ) +} + +func decodeLiveOrNew(liveObj, newObj runtime.Object, ignoreManagedFieldsFromRequestObject bool) (Managed, error) { + liveAccessor, err := meta.Accessor(liveObj) + if err != nil { + return nil, err + } + + // We take the managedFields of the live object in case the request tries to + // manually set managedFields via a subresource. + if ignoreManagedFieldsFromRequestObject { + return emptyManagedFieldsOnErr(DecodeManagedFields(liveAccessor.GetManagedFields())) + } + + // If the object doesn't have metadata, we should just return without trying to + // set the managedFields at all, so creates/updates/patches will work normally. + newAccessor, err := meta.Accessor(newObj) + if err != nil { + return nil, err + } + + if isResetManagedFields(newAccessor.GetManagedFields()) { + return NewEmptyManaged(), nil + } + + // If the managed field is empty or we failed to decode it, + // let's try the live object. This is to prevent clients who + // don't understand managedFields from deleting it accidentally. + managed, err := DecodeManagedFields(newAccessor.GetManagedFields()) + if err != nil || len(managed.Fields()) == 0 { + return emptyManagedFieldsOnErr(DecodeManagedFields(liveAccessor.GetManagedFields())) + } + return managed, nil +} + +func emptyManagedFieldsOnErr(managed Managed, err error) (Managed, error) { + if err != nil { + return NewEmptyManaged(), nil + } + return managed, nil +} + +// Update is used when the object has already been merged (non-apply +// use-case), and simply updates the managed fields in the output +// object. +func (f *FieldManager) Update(liveObj, newObj runtime.Object, manager string) (object runtime.Object, err error) { + // First try to decode the managed fields provided in the update, + // This is necessary to allow directly updating managed fields. + isSubresource := f.subresource != "" + managed, err := decodeLiveOrNew(liveObj, newObj, isSubresource) + if err != nil { + return newObj, nil + } + + RemoveObjectManagedFields(newObj) + + if object, managed, err = f.fieldManager.Update(liveObj, newObj, managed, manager); err != nil { + return nil, err + } + + if err = EncodeObjectManagedFields(object, managed); err != nil { + return nil, fmt.Errorf("failed to encode managed fields: %v", err) + } + + return object, nil +} + +// UpdateNoErrors is the same as Update, but it will not return +// errors. If an error happens, the object is returned with +// managedFields cleared. +func (f *FieldManager) UpdateNoErrors(liveObj, newObj runtime.Object, manager string) runtime.Object { + obj, err := f.Update(liveObj, newObj, manager) + if err != nil { + atMostEverySecond.Do(func() { + ns, name := "unknown", "unknown" + if accessor, err := meta.Accessor(newObj); err == nil { + ns = accessor.GetNamespace() + name = accessor.GetName() + } + + klog.ErrorS(err, "[SHOULD NOT HAPPEN] failed to update managedFields", "versionKind", + newObj.GetObjectKind().GroupVersionKind(), "namespace", ns, "name", name) + }) + // Explicitly remove managedFields on failure, so that + // we can't have garbage in it. + RemoveObjectManagedFields(newObj) + return newObj + } + return obj +} + +// Returns true if the managedFields indicate that the user is trying to +// reset the managedFields, i.e. if the list is non-nil but empty, or if +// the list has one empty item. +func isResetManagedFields(managedFields []metav1.ManagedFieldsEntry) bool { + if len(managedFields) == 0 { + return managedFields != nil + } + + if len(managedFields) == 1 { + return reflect.DeepEqual(managedFields[0], metav1.ManagedFieldsEntry{}) + } + + return false +} + +// Apply is used when server-side apply is called, as it merges the +// object and updates the managed fields. +func (f *FieldManager) Apply(liveObj, appliedObj runtime.Object, manager string, force bool) (object runtime.Object, err error) { + // If the object doesn't have metadata, apply isn't allowed. + accessor, err := meta.Accessor(liveObj) + if err != nil { + return nil, fmt.Errorf("couldn't get accessor: %v", err) + } + + // Decode the managed fields in the live object, since it isn't allowed in the patch. + managed, err := DecodeManagedFields(accessor.GetManagedFields()) + if err != nil { + return nil, fmt.Errorf("failed to decode managed fields: %v", err) + } + + object, managed, err = f.fieldManager.Apply(liveObj, appliedObj, managed, manager, force) + if err != nil { + if conflicts, ok := err.(merge.Conflicts); ok { + return nil, NewConflictError(conflicts) + } + return nil, err + } + + if err = EncodeObjectManagedFields(object, managed); err != nil { + return nil, fmt.Errorf("failed to encode managed fields: %v", err) + } + + return object, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/lastapplied.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/lastapplied.go new file mode 100644 index 000000000000..b00b6b8298aa --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/lastapplied.go @@ -0,0 +1,50 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" +) + +// LastAppliedConfigAnnotation is the annotation used to store the previous +// configuration of a resource for use in a three way diff by UpdateApplyAnnotation. +// +// This is a copy of the corev1 annotation since we don't want to depend on the whole package. +const LastAppliedConfigAnnotation = "kubectl.kubernetes.io/last-applied-configuration" + +// SetLastApplied sets the last-applied annotation the given value in +// the object. +func SetLastApplied(obj runtime.Object, value string) error { + accessor, err := meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + var annotations = accessor.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + annotations[LastAppliedConfigAnnotation] = value + if err := apimachineryvalidation.ValidateAnnotationsSize(annotations); err != nil { + delete(annotations, LastAppliedConfigAnnotation) + } + accessor.SetAnnotations(annotations) + return nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/lastappliedmanager.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/lastappliedmanager.go similarity index 97% rename from cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/lastappliedmanager.go rename to cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/lastappliedmanager.go index 4b07d462a220..3f6cf88210ca 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/lastappliedmanager.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/lastappliedmanager.go @@ -14,13 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -package fieldmanager +package internal import ( "encoding/json" "fmt" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" @@ -100,7 +99,7 @@ func (f *lastAppliedManager) allowedConflictsFromLastApplied(liveObj runtime.Obj if annotations == nil { return nil, fmt.Errorf("no last applied annotation") } - var lastApplied, ok = annotations[corev1.LastAppliedConfigAnnotation] + var lastApplied, ok = annotations[LastAppliedConfigAnnotation] if !ok || lastApplied == "" { return nil, fmt.Errorf("no last applied annotation") } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/lastappliedupdater.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/lastappliedupdater.go similarity index 78% rename from cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/lastappliedupdater.go rename to cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/lastappliedupdater.go index 7cd4eb1289b0..06e6c5d8ce5f 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/lastappliedupdater.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/lastappliedupdater.go @@ -14,14 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -package fieldmanager +package internal import ( "fmt" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" - apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" ) @@ -62,7 +60,7 @@ func (f *lastAppliedUpdater) Apply(liveObj, newObj runtime.Object, managed Manag if err != nil { return nil, nil, fmt.Errorf("failed to build last-applied annotation: %v", err) } - err = setLastApplied(liveObj, lastAppliedValue) + err = SetLastApplied(liveObj, lastAppliedValue) if err != nil { return nil, nil, fmt.Errorf("failed to set last-applied annotation: %v", err) } @@ -79,27 +77,10 @@ func hasLastApplied(obj runtime.Object) bool { if annotations == nil { return false } - lastApplied, ok := annotations[corev1.LastAppliedConfigAnnotation] + lastApplied, ok := annotations[LastAppliedConfigAnnotation] return ok && len(lastApplied) > 0 } -func setLastApplied(obj runtime.Object, value string) error { - accessor, err := meta.Accessor(obj) - if err != nil { - panic(fmt.Sprintf("couldn't get accessor: %v", err)) - } - var annotations = accessor.GetAnnotations() - if annotations == nil { - annotations = map[string]string{} - } - annotations[corev1.LastAppliedConfigAnnotation] = value - if err := apimachineryvalidation.ValidateAnnotationsSize(annotations); err != nil { - delete(annotations, corev1.LastAppliedConfigAnnotation) - } - accessor.SetAnnotations(annotations) - return nil -} - func buildLastApplied(obj runtime.Object) (string, error) { obj = obj.DeepCopyObject() @@ -110,7 +91,7 @@ func buildLastApplied(obj runtime.Object) (string, error) { // Remove the annotation from the object before encoding the object var annotations = accessor.GetAnnotations() - delete(annotations, corev1.LastAppliedConfigAnnotation) + delete(annotations, LastAppliedConfigAnnotation) accessor.SetAnnotations(annotations) lastApplied, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/managedfieldsupdater.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/managedfieldsupdater.go similarity index 95% rename from cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/managedfieldsupdater.go rename to cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/managedfieldsupdater.go index 412443a6c4e8..376eed6b207a 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/managedfieldsupdater.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/managedfieldsupdater.go @@ -14,14 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package fieldmanager +package internal import ( "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal" "sigs.k8s.io/structured-merge-diff/v4/fieldpath" ) @@ -77,7 +76,7 @@ func (f *managedFieldsUpdater) Apply(liveObj, appliedObj runtime.Object, managed managed.Times()[fieldManager] = &metav1.Time{Time: time.Now().UTC()} } else { object = liveObj.DeepCopyObject() - internal.RemoveObjectManagedFields(object) + RemoveObjectManagedFields(object) } return object, managed, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/manager.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/manager.go new file mode 100644 index 000000000000..053936103d74 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/manager.go @@ -0,0 +1,52 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +// Managed groups a fieldpath.ManagedFields together with the timestamps associated with each operation. +type Managed interface { + // Fields gets the fieldpath.ManagedFields. + Fields() fieldpath.ManagedFields + + // Times gets the timestamps associated with each operation. + Times() map[string]*metav1.Time +} + +// Manager updates the managed fields and merges applied configurations. +type Manager interface { + // Update is used when the object has already been merged (non-apply + // use-case), and simply updates the managed fields in the output + // object. + // * `liveObj` is not mutated by this function + // * `newObj` may be mutated by this function + // Returns the new object with managedFields removed, and the object's new + // proposed managedFields separately. + Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) + + // Apply is used when server-side apply is called, as it merges the + // object and updates the managed fields. + // * `liveObj` is not mutated by this function + // * `newObj` may be mutated by this function + // Returns the new object with managedFields removed, and the object's new + // proposed managedFields separately. + Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/skipnonapplied.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/skipnonapplied.go similarity index 99% rename from cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/skipnonapplied.go rename to cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/skipnonapplied.go index a8c34ad65297..6b281ec1e575 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/skipnonapplied.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/skipnonapplied.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package fieldmanager +package internal import ( "fmt" diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/stripmeta.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/stripmeta.go similarity index 99% rename from cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/stripmeta.go rename to cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/stripmeta.go index 1460d9c80217..9b61f3a6f352 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/stripmeta.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/stripmeta.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package fieldmanager +package internal import ( "fmt" diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/structuredmerge.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/structuredmerge.go similarity index 97% rename from cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/structuredmerge.go rename to cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/structuredmerge.go index 213988e23c28..eb5598ac3bfa 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/structuredmerge.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/structuredmerge.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package fieldmanager +package internal import ( "fmt" @@ -23,7 +23,6 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal" "sigs.k8s.io/structured-merge-diff/v4/fieldpath" "sigs.k8s.io/structured-merge-diff/v4/merge" ) @@ -108,7 +107,7 @@ func (f *structuredMergeManager) Update(liveObj, newObj runtime.Object, managed if err != nil { return nil, nil, fmt.Errorf("failed to update ManagedFields (%v): %v", objectGVKNN(newObjVersioned), err) } - managed = internal.NewManaged(managedFields, managed.Times()) + managed = NewManaged(managedFields, managed.Times()) return newObj, managed, nil } @@ -151,7 +150,7 @@ func (f *structuredMergeManager) Apply(liveObj, patchObj runtime.Object, managed if err != nil { return nil, nil, err } - managed = internal.NewManaged(managedFields, managed.Times()) + managed = NewManaged(managedFields, managed.Times()) if newObjTyped == nil { return nil, managed, nil diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/typeconverter.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/typeconverter.go new file mode 100644 index 000000000000..3ff7507b32c9 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/typeconverter.go @@ -0,0 +1,112 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/managedfields" + utilopenapi "k8s.io/apiserver/pkg/util/openapi" + "k8s.io/kube-openapi/pkg/validation/spec" + "sigs.k8s.io/structured-merge-diff/v4/typed" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +// TypeConverter allows you to convert from runtime.Object to +// typed.TypedValue and the other way around. +type TypeConverter interface { + ObjectToTyped(runtime.Object) (*typed.TypedValue, error) + TypedToObject(*typed.TypedValue) (runtime.Object, error) +} + +type typeConverter struct { + parser *managedfields.GvkParser +} + +var _ TypeConverter = &typeConverter{} + +// NewTypeConverter builds a TypeConverter from a proto.Models. This +// will automatically find the proper version of the object, and the +// corresponding schema information. +func NewTypeConverter(openapiSpec *spec.Swagger, preserveUnknownFields bool) (TypeConverter, error) { + models, err := utilopenapi.ToProtoModels(openapiSpec) + if err != nil { + return nil, err + } + parser, err := managedfields.NewGVKParser(models, preserveUnknownFields) + if err != nil { + return nil, err + } + return &typeConverter{parser: parser}, nil +} + +func (c *typeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, error) { + gvk := obj.GetObjectKind().GroupVersionKind() + t := c.parser.Type(gvk) + if t == nil { + return nil, NewNoCorrespondingTypeError(gvk) + } + switch o := obj.(type) { + case *unstructured.Unstructured: + return t.FromUnstructured(o.UnstructuredContent()) + default: + return t.FromStructured(obj) + } +} + +func (c *typeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) { + return valueToObject(value.AsValue()) +} + +type deducedTypeConverter struct{} + +// DeducedTypeConverter is a TypeConverter for CRDs that don't have a +// schema. It does implement the same interface though (and create the +// same types of objects), so that everything can still work the same. +// CRDs are merged with all their fields being "atomic" (lists +// included). +func NewDeducedTypeConverter() TypeConverter { + return deducedTypeConverter{} +} + +// ObjectToTyped converts an object into a TypedValue with a "deduced type". +func (deducedTypeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, error) { + switch o := obj.(type) { + case *unstructured.Unstructured: + return typed.DeducedParseableType.FromUnstructured(o.UnstructuredContent()) + default: + return typed.DeducedParseableType.FromStructured(obj) + } +} + +// TypedToObject transforms the typed value into a runtime.Object. That +// is not specific to deduced type. +func (deducedTypeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) { + return valueToObject(value.AsValue()) +} + +func valueToObject(val value.Value) (runtime.Object, error) { + vu := val.Unstructured() + switch o := vu.(type) { + case map[string]interface{}: + return &unstructured.Unstructured{Object: o}, nil + default: + return nil, fmt.Errorf("failed to convert value to unstructured for type %T", vu) + } +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/versionconverter.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/versionconverter.go similarity index 87% rename from cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/versionconverter.go rename to cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/versionconverter.go index 477e92f796ef..45855fa4ca2b 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/versionconverter.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/versionconverter.go @@ -14,9 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -package fieldmanager +package internal import ( + "fmt" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/structured-merge-diff/v4/fieldpath" @@ -99,3 +101,23 @@ func (v *versionConverter) Convert(object *typed.TypedValue, version fieldpath.A func (v *versionConverter) IsMissingVersionError(err error) bool { return runtime.IsNotRegisteredError(err) || isNoCorrespondingTypeError(err) } + +type noCorrespondingTypeErr struct { + gvk schema.GroupVersionKind +} + +func NewNoCorrespondingTypeError(gvk schema.GroupVersionKind) error { + return &noCorrespondingTypeErr{gvk: gvk} +} + +func (k *noCorrespondingTypeErr) Error() string { + return fmt.Sprintf("no corresponding type for %v", k.gvk) +} + +func isNoCorrespondingTypeError(err error) bool { + if err == nil { + return false + } + _, ok := err.(*noCorrespondingTypeErr) + return ok +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler.go index d9844990c29d..655bf91d1e13 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler.go @@ -60,7 +60,7 @@ func NewScaleHandler(parentEntries []metav1.ManagedFieldsEntry, groupVersion sch // 2. Replicas path of the main resource is transformed to the replicas path of // the scale subresource func (h *ScaleHandler) ToSubresource() ([]metav1.ManagedFieldsEntry, error) { - managed, err := DecodeManagedFields(h.parentEntries) + managed, err := internal.DecodeManagedFields(h.parentEntries) if err != nil { return nil, err } @@ -92,13 +92,13 @@ func (h *ScaleHandler) ToSubresource() ([]metav1.ManagedFieldsEntry, error) { // ToParent merges `scaleEntries` with the entries of the main resource and // transforms them accordingly func (h *ScaleHandler) ToParent(scaleEntries []metav1.ManagedFieldsEntry) ([]metav1.ManagedFieldsEntry, error) { - decodedParentEntries, err := DecodeManagedFields(h.parentEntries) + decodedParentEntries, err := internal.DecodeManagedFields(h.parentEntries) if err != nil { return nil, err } parentFields := decodedParentEntries.Fields() - decodedScaleEntries, err := DecodeManagedFields(scaleEntries) + decodedScaleEntries, err := internal.DecodeManagedFields(scaleEntries) if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/typeconverter.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/typeconverter.go index fc40546f1015..0aa97f7e7904 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/typeconverter.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/typeconverter.go @@ -17,114 +17,26 @@ limitations under the License. package fieldmanager import ( - "fmt" - - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/managedfields" - "k8s.io/kube-openapi/pkg/util/proto" - "sigs.k8s.io/structured-merge-diff/v4/typed" - "sigs.k8s.io/structured-merge-diff/v4/value" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal" + "k8s.io/kube-openapi/pkg/validation/spec" ) // TypeConverter allows you to convert from runtime.Object to // typed.TypedValue and the other way around. -type TypeConverter interface { - ObjectToTyped(runtime.Object) (*typed.TypedValue, error) - TypedToObject(*typed.TypedValue) (runtime.Object, error) -} +type TypeConverter = internal.TypeConverter -// DeducedTypeConverter is a TypeConverter for CRDs that don't have a -// schema. It does implement the same interface though (and create the -// same types of objects), so that everything can still work the same. -// CRDs are merged with all their fields being "atomic" (lists +// NewDeducedTypeConverter creates a TypeConverter for CRDs that don't +// have a schema. It does implement the same interface though (and +// create the same types of objects), so that everything can still work +// the same. CRDs are merged with all their fields being "atomic" (lists // included). -// -// Note that this is not going to be sufficient for converting to/from -// CRDs that have a schema defined (we don't support that schema yet). -// TODO(jennybuckley): Use the schema provided by a CRD if it exists. -type DeducedTypeConverter struct{} - -var _ TypeConverter = DeducedTypeConverter{} - -// ObjectToTyped converts an object into a TypedValue with a "deduced type". -func (DeducedTypeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, error) { - switch o := obj.(type) { - case *unstructured.Unstructured: - return typed.DeducedParseableType.FromUnstructured(o.UnstructuredContent()) - default: - return typed.DeducedParseableType.FromStructured(obj) - } -} - -// TypedToObject transforms the typed value into a runtime.Object. That -// is not specific to deduced type. -func (DeducedTypeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) { - return valueToObject(value.AsValue()) +func NewDeducedTypeConverter() TypeConverter { + return internal.NewDeducedTypeConverter() } -type typeConverter struct { - parser *managedfields.GvkParser -} - -var _ TypeConverter = &typeConverter{} - // NewTypeConverter builds a TypeConverter from a proto.Models. This // will automatically find the proper version of the object, and the // corresponding schema information. -func NewTypeConverter(models proto.Models, preserveUnknownFields bool) (TypeConverter, error) { - parser, err := managedfields.NewGVKParser(models, preserveUnknownFields) - if err != nil { - return nil, err - } - return &typeConverter{parser: parser}, nil -} - -func (c *typeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, error) { - gvk := obj.GetObjectKind().GroupVersionKind() - t := c.parser.Type(gvk) - if t == nil { - return nil, newNoCorrespondingTypeError(gvk) - } - switch o := obj.(type) { - case *unstructured.Unstructured: - return t.FromUnstructured(o.UnstructuredContent()) - default: - return t.FromStructured(obj) - } -} - -func (c *typeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) { - return valueToObject(value.AsValue()) -} - -func valueToObject(val value.Value) (runtime.Object, error) { - vu := val.Unstructured() - switch o := vu.(type) { - case map[string]interface{}: - return &unstructured.Unstructured{Object: o}, nil - default: - return nil, fmt.Errorf("failed to convert value to unstructured for type %T", vu) - } -} - -type noCorrespondingTypeErr struct { - gvk schema.GroupVersionKind -} - -func newNoCorrespondingTypeError(gvk schema.GroupVersionKind) error { - return &noCorrespondingTypeErr{gvk: gvk} -} - -func (k *noCorrespondingTypeErr) Error() string { - return fmt.Sprintf("no corresponding type for %v", k.gvk) -} - -func isNoCorrespondingTypeError(err error) bool { - if err == nil { - return false - } - _, ok := err.(*noCorrespondingTypeErr) - return ok +func NewTypeConverter(openapiSpec *spec.Swagger, preserveUnknownFields bool) (TypeConverter, error) { + return internal.NewTypeConverter(openapiSpec, preserveUnknownFields) } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/installer.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/installer.go index b0af449f09b6..2c4d7b95793f 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/installer.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/installer.go @@ -1080,6 +1080,14 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag if categoriesProvider, ok := storage.(rest.CategoriesProvider); ok { apiResource.Categories = categoriesProvider.Categories() } + if !isSubresource { + singularNameProvider, ok := storage.(rest.SingularNameProvider) + if !ok { + return nil, nil, fmt.Errorf("resource %s must implement SingularNameProvider", resource) + } + apiResource.SingularName = singularNameProvider.GetSingularName() + } + if gvkProvider, ok := storage.(rest.GroupVersionKindProvider); ok { gvk := gvkProvider.GroupVersionKind(a.group.GroupVersion) apiResource.Group = gvk.Group diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go index 18340107d36a..4c2abfaedcef 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go @@ -321,6 +321,14 @@ var ( "UPDATE", "WATCH", "WATCHLIST") + + // These are the valid connect requests which we report in our metrics. + validConnectRequests = utilsets.NewString( + "log", + "exec", + "portforward", + "attach", + "proxy") ) const ( @@ -427,7 +435,7 @@ func RecordRequestAbort(req *http.Request, requestInfo *request.RequestInfo) { } scope := CleanScope(requestInfo) - reportedVerb := cleanVerb(CanonicalVerb(strings.ToUpper(req.Method), scope), getVerbIfWatch(req), req) + reportedVerb := cleanVerb(CanonicalVerb(strings.ToUpper(req.Method), scope), "", req, requestInfo) resource := requestInfo.Resource subresource := requestInfo.Subresource group := requestInfo.APIGroup @@ -448,7 +456,7 @@ func RecordDroppedRequest(req *http.Request, requestInfo *request.RequestInfo, c // InstrumentRouteFunc which is registered in installer.go with predefined // list of verbs (different than those translated to RequestInfo). // However, we need to tweak it e.g. to differentiate GET from LIST. - reportedVerb := cleanVerb(CanonicalVerb(strings.ToUpper(req.Method), scope), getVerbIfWatch(req), req) + reportedVerb := cleanVerb(CanonicalVerb(strings.ToUpper(req.Method), scope), "", req, requestInfo) if requestInfo.IsResourceRequest { requestCounter.WithContext(req.Context()).WithLabelValues(reportedVerb, dryRun, requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component, codeToString(http.StatusTooManyRequests)).Inc() @@ -471,7 +479,7 @@ func RecordRequestTermination(req *http.Request, requestInfo *request.RequestInf // InstrumentRouteFunc which is registered in installer.go with predefined // list of verbs (different than those translated to RequestInfo). // However, we need to tweak it e.g. to differentiate GET from LIST. - reportedVerb := cleanVerb(CanonicalVerb(strings.ToUpper(req.Method), scope), getVerbIfWatch(req), req) + reportedVerb := cleanVerb(CanonicalVerb(strings.ToUpper(req.Method), scope), "", req, requestInfo) if requestInfo.IsResourceRequest { requestTerminationsTotal.WithContext(req.Context()).WithLabelValues(reportedVerb, requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component, codeToString(code)).Inc() @@ -493,7 +501,7 @@ func RecordLongRunning(req *http.Request, requestInfo *request.RequestInfo, comp // InstrumentRouteFunc which is registered in installer.go with predefined // list of verbs (different than those translated to RequestInfo). // However, we need to tweak it e.g. to differentiate GET from LIST. - reportedVerb := cleanVerb(CanonicalVerb(strings.ToUpper(req.Method), scope), getVerbIfWatch(req), req) + reportedVerb := cleanVerb(CanonicalVerb(strings.ToUpper(req.Method), scope), "", req, requestInfo) if requestInfo.IsResourceRequest { g = longRunningRequestsGauge.WithContext(req.Context()).WithLabelValues(reportedVerb, requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component) @@ -508,11 +516,15 @@ func RecordLongRunning(req *http.Request, requestInfo *request.RequestInfo, comp // MonitorRequest handles standard transformations for client and the reported verb and then invokes Monitor to record // a request. verb must be uppercase to be backwards compatible with existing monitoring tooling. func MonitorRequest(req *http.Request, verb, group, version, resource, subresource, scope, component string, deprecated bool, removedRelease string, httpCode, respSize int, elapsed time.Duration) { + requestInfo, ok := request.RequestInfoFrom(req.Context()) + if !ok || requestInfo == nil { + requestInfo = &request.RequestInfo{Verb: req.Method, Path: req.URL.Path} + } // We don't use verb from , as this may be propagated from // InstrumentRouteFunc which is registered in installer.go with predefined // list of verbs (different than those translated to RequestInfo). // However, we need to tweak it e.g. to differentiate GET from LIST. - reportedVerb := cleanVerb(CanonicalVerb(strings.ToUpper(req.Method), scope), verb, req) + reportedVerb := cleanVerb(CanonicalVerb(strings.ToUpper(req.Method), scope), verb, req, requestInfo) dryRun := cleanDryRun(req.URL) elapsedSeconds := elapsed.Seconds() @@ -585,15 +597,16 @@ func InstrumentHandlerFunc(verb, group, version, resource, subresource, scope, c // NormalizedVerb returns normalized verb func NormalizedVerb(req *http.Request) string { verb := req.Method - if requestInfo, ok := request.RequestInfoFrom(req.Context()); ok { + requestInfo, ok := request.RequestInfoFrom(req.Context()) + if ok { // If we can find a requestInfo, we can get a scope, and then // we can convert GETs to LISTs when needed. scope := CleanScope(requestInfo) verb = CanonicalVerb(strings.ToUpper(verb), scope) } - // mark APPLY requests and WATCH requests correctly. - return CleanVerb(verb, req) + // mark APPLY requests, WATCH requests and CONNECT requests correctly. + return CleanVerb(verb, req, requestInfo) } // CleanScope returns the scope of the request. @@ -626,8 +639,8 @@ func CanonicalVerb(verb string, scope string) string { } // CleanVerb returns a normalized verb, so that it is easy to tell WATCH from -// LIST and APPLY from PATCH. -func CleanVerb(verb string, request *http.Request) string { +// LIST, APPLY from PATCH and CONNECT from others. +func CleanVerb(verb string, request *http.Request, requestInfo *request.RequestInfo) string { reportedVerb := verb if suggestedVerb := getVerbIfWatch(request); suggestedVerb == "WATCH" { reportedVerb = "WATCH" @@ -639,21 +652,22 @@ func CleanVerb(verb string, request *http.Request) string { if verb == "PATCH" && request.Header.Get("Content-Type") == string(types.ApplyPatchType) { reportedVerb = "APPLY" } + if requestInfo != nil && requestInfo.IsResourceRequest && len(requestInfo.Subresource) > 0 && validConnectRequests.Has(requestInfo.Subresource) { + reportedVerb = "CONNECT" + } return reportedVerb } // cleanVerb additionally ensures that unknown verbs don't clog up the metrics. -func cleanVerb(verb, suggestedVerb string, request *http.Request) string { +func cleanVerb(verb, suggestedVerb string, request *http.Request, requestInfo *request.RequestInfo) string { // CanonicalVerb (being an input for this function) doesn't handle correctly the // deprecated path pattern for watch of: // GET /api/{version}/watch/{resource} // We correct it manually based on the pass verb from the installer. - var reportedVerb string if suggestedVerb == "WATCH" || suggestedVerb == "WATCHLIST" { - reportedVerb = "WATCH" - } else { - reportedVerb = CleanVerb(verb, request) + return "WATCH" } + reportedVerb := CleanVerb(verb, request, requestInfo) if validRequestMethods.Has(reportedVerb) { return reportedVerb } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go index 2bc00a66e7b9..2558494bd9aa 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go @@ -211,7 +211,7 @@ func (r *RequestInfoFactory) NewRequestInfo(req *http.Request) (*RequestInfo, er opts := metainternalversion.ListOptions{} if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), metav1.SchemeGroupVersion, &opts); err != nil { // An error in parsing request will result in default to "list" and not setting "name" field. - klog.ErrorS(err, "Couldn't parse request", "Request", req.URL.Query()) + klog.ErrorS(err, "Couldn't parse request", "request", req.URL.Query()) // Reset opts to not rely on partial results from parsing. // However, if watch is set, let's report it. opts = metainternalversion.ListOptions{} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/features/kube_features.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/features/kube_features.go index aa903587d816..624181e9ea10 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/features/kube_features.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/features/kube_features.go @@ -220,7 +220,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS APIServerTracing: {Default: false, PreRelease: featuregate.Alpha}, - AdvancedAuditing: {Default: true, PreRelease: featuregate.GA}, + AdvancedAuditing: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28 ValidatingAdmissionPolicy: {Default: false, PreRelease: featuregate.Alpha}, diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go index 40bca49665f6..6e8d291a3d9d 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go @@ -110,6 +110,9 @@ type Store struct { // See qualifiedResourceFromContext for details. DefaultQualifiedResource schema.GroupResource + // SingularQualifiedResource is the singular name of the resource. + SingularQualifiedResource schema.GroupResource + // KeyRootFunc returns the root etcd key for this resource; should not // include trailing "/". This is used for operations that work on the // entire collection (listing and watching). @@ -229,6 +232,8 @@ var _ rest.StandardStorage = &Store{} var _ rest.TableConvertor = &Store{} var _ GenericStore = &Store{} +var _ rest.SingularNameProvider = &Store{} + const ( OptimisticLockErrorMsg = "the object has been modified; please apply your changes to the latest version and try again" resourceCountPollPeriodJitter = 1.2 @@ -1320,6 +1325,12 @@ func (e *Store) CompleteWithOptions(options *generic.StoreOptions) error { if e.DefaultQualifiedResource.Empty() { return fmt.Errorf("store %#v must have a non-empty qualified resource", e) } + if e.SingularQualifiedResource.Empty() { + return fmt.Errorf("store %#v must have a non-empty singular qualified resource", e) + } + if e.DefaultQualifiedResource.Group != e.SingularQualifiedResource.Group { + return fmt.Errorf("store for %#v, singular and plural qualified resource's group name's must match", e) + } if e.NewFunc == nil { return fmt.Errorf("store for %s must have NewFunc set", e.DefaultQualifiedResource.String()) } @@ -1515,6 +1526,10 @@ func (e *Store) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set { return e.ResetFieldsStrategy.GetResetFields() } +func (e *Store) GetSingularName() string { + return e.SingularQualifiedResource.Resource +} + // validateIndexers will check the prefix of indexers. func validateIndexers(indexers *cache.Indexers) error { if indexers == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/rest/rest.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/rest/rest.go index 6330ea8f531f..b8d47ff4fe30 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/rest/rest.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/rest/rest.go @@ -89,6 +89,12 @@ type CategoriesProvider interface { Categories() []string } +// SingularNameProvider returns singular name of resources. This is used by kubectl discovery to have singular +// name representation of resources. In case of shortcut conflicts(with CRD shortcuts) singular name should always map to this resource. +type SingularNameProvider interface { + GetSingularName() string +} + // GroupVersionKindProvider is used to specify a particular GroupVersionKind to discovery. This is used for polymorphic endpoints // which generally point to foreign versions. Scale refers to Scale.v1beta1.extensions for instance. // This trumps KindProvider since it is capable of providing the information required. diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/config.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/config.go index b572d3359107..86f8eca6d8e1 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/config.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/config.go @@ -34,6 +34,7 @@ import ( jsonpatch "github.com/evanphx/json-patch" "github.com/google/uuid" + "golang.org/x/crypto/cryptobyte" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -126,6 +127,7 @@ type Config struct { EnableIndex bool EnableProfiling bool + DebugSocketPath string EnableDiscovery bool // Requires generic profiling enabled @@ -155,8 +157,10 @@ type Config struct { // BuildHandlerChainFunc allows you to build custom handler chains by decorating the apiHandler. BuildHandlerChainFunc func(apiHandler http.Handler, c *Config) (secure http.Handler) - // HandlerChainWaitGroup allows you to wait for all chain handlers exit after the server shutdown. - HandlerChainWaitGroup *utilwaitgroup.SafeWaitGroup + // NonLongRunningRequestWaitGroup allows you to wait for all chain + // handlers associated with non long-running requests + // to complete while the server is shuting down. + NonLongRunningRequestWaitGroup *utilwaitgroup.SafeWaitGroup // DiscoveryAddresses is used to build the IPs pass to discovery. If nil, the ExternalAddress is // always reported DiscoveryAddresses discovery.Addresses @@ -342,31 +346,48 @@ func NewConfig(codecs serializer.CodecFactory) *Config { klog.Fatalf("error getting hostname for apiserver identity: %v", err) } - hash := sha256.Sum256([]byte(hostname)) - id = "kube-apiserver-" + strings.ToLower(base32.StdEncoding.WithPadding(base32.NoPadding).EncodeToString(hash[:16])) + // Since the hash needs to be unique across each kube-apiserver and aggregated apiservers, + // the hash used for the identity should include both the hostname and the identity value. + // TODO: receive the identity value as a parameter once the apiserver identity lease controller + // post start hook is moved to generic apiserver. + b := cryptobyte.NewBuilder(nil) + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes([]byte(hostname)) + }) + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes([]byte("kube-apiserver")) + }) + hashData, err := b.Bytes() + if err != nil { + klog.Fatalf("error building hash data for apiserver identity: %v", err) + } + + hash := sha256.Sum256(hashData) + id = "apiserver-" + strings.ToLower(base32.StdEncoding.WithPadding(base32.NoPadding).EncodeToString(hash[:16])) } lifecycleSignals := newLifecycleSignals() return &Config{ - Serializer: codecs, - BuildHandlerChainFunc: DefaultBuildHandlerChain, - HandlerChainWaitGroup: new(utilwaitgroup.SafeWaitGroup), - LegacyAPIGroupPrefixes: sets.NewString(DefaultLegacyAPIPrefix), - DisabledPostStartHooks: sets.NewString(), - PostStartHooks: map[string]PostStartHookConfigEntry{}, - HealthzChecks: append([]healthz.HealthChecker{}, defaultHealthChecks...), - ReadyzChecks: append([]healthz.HealthChecker{}, defaultHealthChecks...), - LivezChecks: append([]healthz.HealthChecker{}, defaultHealthChecks...), - EnableIndex: true, - EnableDiscovery: true, - EnableProfiling: true, - EnableMetrics: true, - MaxRequestsInFlight: 400, - MaxMutatingRequestsInFlight: 200, - RequestTimeout: time.Duration(60) * time.Second, - MinRequestTimeout: 1800, - LivezGracePeriod: time.Duration(0), - ShutdownDelayDuration: time.Duration(0), + Serializer: codecs, + BuildHandlerChainFunc: DefaultBuildHandlerChain, + NonLongRunningRequestWaitGroup: new(utilwaitgroup.SafeWaitGroup), + LegacyAPIGroupPrefixes: sets.NewString(DefaultLegacyAPIPrefix), + DisabledPostStartHooks: sets.NewString(), + PostStartHooks: map[string]PostStartHookConfigEntry{}, + HealthzChecks: append([]healthz.HealthChecker{}, defaultHealthChecks...), + ReadyzChecks: append([]healthz.HealthChecker{}, defaultHealthChecks...), + LivezChecks: append([]healthz.HealthChecker{}, defaultHealthChecks...), + EnableIndex: true, + EnableDiscovery: true, + EnableProfiling: true, + DebugSocketPath: "", + EnableMetrics: true, + MaxRequestsInFlight: 400, + MaxMutatingRequestsInFlight: 200, + RequestTimeout: time.Duration(60) * time.Second, + MinRequestTimeout: 1800, + LivezGracePeriod: time.Duration(0), + ShutdownDelayDuration: time.Duration(0), // 1.5MB is the default client request size in bytes // the etcd server should accept. See // https://github.com/etcd-io/etcd/blob/release-3.4/embed/config.go#L56. @@ -631,20 +652,26 @@ func (c completedConfig) New(name string, delegationTarget DelegationTarget) (*G return c.BuildHandlerChainFunc(handler, c.Config) } + var debugSocket *routes.DebugSocket + if c.DebugSocketPath != "" { + debugSocket = routes.NewDebugSocket(c.DebugSocketPath) + } + apiServerHandler := NewAPIServerHandler(name, c.Serializer, handlerChainBuilder, delegationTarget.UnprotectedHandler()) s := &GenericAPIServer{ - discoveryAddresses: c.DiscoveryAddresses, - LoopbackClientConfig: c.LoopbackClientConfig, - legacyAPIGroupPrefixes: c.LegacyAPIGroupPrefixes, - admissionControl: c.AdmissionControl, - Serializer: c.Serializer, - AuditBackend: c.AuditBackend, - Authorizer: c.Authorization.Authorizer, - delegationTarget: delegationTarget, - EquivalentResourceRegistry: c.EquivalentResourceRegistry, - HandlerChainWaitGroup: c.HandlerChainWaitGroup, - Handler: apiServerHandler, + discoveryAddresses: c.DiscoveryAddresses, + LoopbackClientConfig: c.LoopbackClientConfig, + legacyAPIGroupPrefixes: c.LegacyAPIGroupPrefixes, + admissionControl: c.AdmissionControl, + Serializer: c.Serializer, + AuditBackend: c.AuditBackend, + Authorizer: c.Authorization.Authorizer, + delegationTarget: delegationTarget, + EquivalentResourceRegistry: c.EquivalentResourceRegistry, + NonLongRunningRequestWaitGroup: c.NonLongRunningRequestWaitGroup, + Handler: apiServerHandler, + UnprotectedDebugSocket: debugSocket, listedPathProvider: apiServerHandler, @@ -879,7 +906,7 @@ func DefaultBuildHandlerChain(apiHandler http.Handler, c *Config) http.Handler { handler = genericapifilters.WithRequestDeadline(handler, c.AuditBackend, c.AuditPolicyRuleEvaluator, c.LongRunningFunc, c.Serializer, c.RequestTimeout) - handler = genericfilters.WithWaitGroup(handler, c.LongRunningFunc, c.HandlerChainWaitGroup) + handler = genericfilters.WithWaitGroup(handler, c.LongRunningFunc, c.NonLongRunningRequestWaitGroup) if c.SecureServing != nil && !c.SecureServing.DisableHTTP2 && c.GoawayChance > 0 { handler = genericfilters.WithProbabilisticGoaway(handler, c.GoawayChance) } @@ -914,6 +941,13 @@ func installAPI(s *GenericAPIServer, c *Config) { // so far, only logging related endpoints are considered valid to add for these debug flags. routes.DebugFlags{}.Install(s.Handler.NonGoRestfulMux, "v", routes.StringFlagPutHandler(logs.GlogSetter)) } + if s.UnprotectedDebugSocket != nil { + s.UnprotectedDebugSocket.InstallProfiling() + s.UnprotectedDebugSocket.InstallDebugFlag("v", routes.StringFlagPutHandler(logs.GlogSetter)) + if c.EnableContentionProfiling { + goruntime.SetBlockProfileRate(1) + } + } if c.EnableMetrics { if c.EnableProfiling { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go index 661c5ad9a737..6d27633971e8 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go @@ -36,6 +36,7 @@ import ( utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apiserver/pkg/apis/apiserver" egressmetrics "k8s.io/apiserver/pkg/server/egressselector/metrics" + compbasemetrics "k8s.io/component-base/metrics" "k8s.io/component-base/tracing" "k8s.io/klog/v2" client "sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client" @@ -43,6 +44,10 @@ import ( var directDialer utilnet.DialFunc = http.DefaultTransport.(*http.Transport).DialContext +func init() { + client.Metrics.RegisterMetrics(compbasemetrics.NewKubeRegistry().Registerer()) +} + // EgressSelector is the map of network context type to context dialer, for network egress. type EgressSelector struct { egressToDialer map[EgressType]utilnet.DialFunc diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/cors.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/cors.go index 29c46e4c7935..ca078cb0520e 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/cors.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/cors.go @@ -38,44 +38,76 @@ func WithCORS(handler http.Handler, allowedOriginPatterns []string, allowedMetho return handler } allowedOriginPatternsREs := allowedOriginRegexps(allowedOriginPatterns) + + // Set defaults for methods and headers if nothing was passed + if allowedMethods == nil { + allowedMethods = []string{"POST", "GET", "OPTIONS", "PUT", "DELETE", "PATCH"} + } + allowMethodsResponseHeader := strings.Join(allowedMethods, ", ") + + if allowedHeaders == nil { + allowedHeaders = []string{"Content-Type", "Content-Length", "Accept-Encoding", "X-CSRF-Token", "Authorization", "X-Requested-With", "If-Modified-Since"} + } + allowHeadersResponseHeader := strings.Join(allowedHeaders, ", ") + + if exposedHeaders == nil { + exposedHeaders = []string{"Date"} + } + exposeHeadersResponseHeader := strings.Join(exposedHeaders, ", ") + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { origin := req.Header.Get("Origin") - if origin != "" { - allowed := false - for _, re := range allowedOriginPatternsREs { - if allowed = re.MatchString(origin); allowed { - break - } - } - if allowed { - w.Header().Set("Access-Control-Allow-Origin", origin) - // Set defaults for methods and headers if nothing was passed - if allowedMethods == nil { - allowedMethods = []string{"POST", "GET", "OPTIONS", "PUT", "DELETE", "PATCH"} - } - if allowedHeaders == nil { - allowedHeaders = []string{"Content-Type", "Content-Length", "Accept-Encoding", "X-CSRF-Token", "Authorization", "X-Requested-With", "If-Modified-Since"} - } - if exposedHeaders == nil { - exposedHeaders = []string{"Date"} - } - w.Header().Set("Access-Control-Allow-Methods", strings.Join(allowedMethods, ", ")) - w.Header().Set("Access-Control-Allow-Headers", strings.Join(allowedHeaders, ", ")) - w.Header().Set("Access-Control-Expose-Headers", strings.Join(exposedHeaders, ", ")) - w.Header().Set("Access-Control-Allow-Credentials", allowCredentials) - - // Stop here if its a preflight OPTIONS request - if req.Method == "OPTIONS" { - w.WriteHeader(http.StatusNoContent) - return - } - } + if origin == "" { + handler.ServeHTTP(w, req) + return + } + if !isOriginAllowed(origin, allowedOriginPatternsREs) { + handler.ServeHTTP(w, req) + return } + + w.Header().Set("Access-Control-Allow-Origin", origin) + w.Header().Set("Access-Control-Allow-Methods", allowMethodsResponseHeader) + w.Header().Set("Access-Control-Allow-Headers", allowHeadersResponseHeader) + w.Header().Set("Access-Control-Expose-Headers", exposeHeadersResponseHeader) + w.Header().Set("Access-Control-Allow-Credentials", allowCredentials) + + // Stop here if its a preflight OPTIONS request + if req.Method == "OPTIONS" { + w.WriteHeader(http.StatusNoContent) + return + } + // Dispatch to the next handler handler.ServeHTTP(w, req) }) } +// isOriginAllowed returns true if the given origin header in the +// request is allowed CORS. +// +// From https://www.rfc-editor.org/rfc/rfc6454#page-13 +// +// a) The origin header can contain host and/or port +// serialized-origin = scheme "://" host [ ":" port ] +// +// b) In some cases, a number of origins contribute to causing the user +// agents to issue an HTTP request. In those cases, the user agent MAY +// list all the origins in the Origin header field. For example, if the +// HTTP request was initially issued by one origin but then later +// redirected by another origin, the user agent MAY inform the server +// that two origins were involved in causing the user agent to issue the +// request +// origin-list = serialized-origin *( SP serialized-origin ) +func isOriginAllowed(originHeader string, allowedOriginPatternsREs []*regexp.Regexp) bool { + for _, re := range allowedOriginPatternsREs { + if re.MatchString(originHeader) { + return true + } + } + return false +} + func allowedOriginRegexps(allowedOrigins []string) []*regexp.Regexp { res, err := compileRegexps(allowedOrigins) if err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/waitgroup.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/waitgroup.go index 70b32c76697d..4cab1f86d8b4 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/waitgroup.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/waitgroup.go @@ -24,20 +24,34 @@ import ( "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" - utilwaitgroup "k8s.io/apimachinery/pkg/util/waitgroup" "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" apirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/client-go/kubernetes/scheme" ) +// RequestWaitGroup helps with the accounting of request(s) that are in +// flight: the caller is expected to invoke Add(1) before executing the +// request handler and then invoke Done() when the handler finishes. +// NOTE: implementations must ensure that it is thread-safe +// when invoked from multiple goroutines. +type RequestWaitGroup interface { + // Add adds delta, which may be negative, similar to sync.WaitGroup. + // If Add with a positive delta happens after Wait, it will return error, + // which prevent unsafe Add. + Add(delta int) error + + // Done decrements the WaitGroup counter. + Done() +} + // WithWaitGroup adds all non long-running requests to wait group, which is used for graceful shutdown. -func WithWaitGroup(handler http.Handler, longRunning apirequest.LongRunningRequestCheck, wg *utilwaitgroup.SafeWaitGroup) http.Handler { +func WithWaitGroup(handler http.Handler, longRunning apirequest.LongRunningRequestCheck, wg RequestWaitGroup) http.Handler { // NOTE: both WithWaitGroup and WithRetryAfter must use the same exact isRequestExemptFunc 'isRequestExemptFromRetryAfter, // otherwise SafeWaitGroup might wait indefinitely and will prevent the server from shutting down gracefully. return withWaitGroup(handler, longRunning, wg, isRequestExemptFromRetryAfter) } -func withWaitGroup(handler http.Handler, longRunning apirequest.LongRunningRequestCheck, wg *utilwaitgroup.SafeWaitGroup, isRequestExemptFn isRequestExemptFunc) http.Handler { +func withWaitGroup(handler http.Handler, longRunning apirequest.LongRunningRequestCheck, wg RequestWaitGroup, isRequestExemptFn isRequestExemptFunc) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { ctx := req.Context() requestInfo, ok := apirequest.RequestInfoFrom(ctx) @@ -64,12 +78,7 @@ func withWaitGroup(handler http.Handler, longRunning apirequest.LongRunningReque // When apiserver is shutting down, signal clients to retry // There is a good chance the client hit a different server, so a tight retry is good for client responsiveness. - w.Header().Add("Retry-After", "1") - w.Header().Set("Content-Type", runtime.ContentTypeJSON) - w.Header().Set("X-Content-Type-Options", "nosniff") - statusErr := apierrors.NewServiceUnavailable("apiserver is shutting down").Status() - w.WriteHeader(int(statusErr.Code)) - fmt.Fprintln(w, runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &statusErr)) + waitGroupWriteRetryAfterToResponse(w) return } @@ -77,3 +86,12 @@ func withWaitGroup(handler http.Handler, longRunning apirequest.LongRunningReque handler.ServeHTTP(w, req) }) } + +func waitGroupWriteRetryAfterToResponse(w http.ResponseWriter) { + w.Header().Add("Retry-After", "1") + w.Header().Set("Content-Type", runtime.ContentTypeJSON) + w.Header().Set("X-Content-Type-Options", "nosniff") + statusErr := apierrors.NewServiceUnavailable("apiserver is shutting down").Status() + w.WriteHeader(int(statusErr.Code)) + fmt.Fprintln(w, runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &statusErr)) +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/wrap.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/wrap.go index d37fc49c681e..a981eae78a80 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/wrap.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/wrap.go @@ -55,7 +55,7 @@ func WithPanicRecovery(handler http.Handler, resolver request.RequestInfoResolve return } http.Error(w, "This request caused apiserver to panic. Look in the logs for details.", http.StatusInternalServerError) - klog.ErrorS(nil, "apiserver panic'd", "method", req.Method, "URI", req.RequestURI, "audit-ID", audit.GetAuditIDTruncated(req.Context())) + klog.ErrorS(nil, "apiserver panic'd", "method", req.Method, "URI", req.RequestURI, "auditID", audit.GetAuditIDTruncated(req.Context())) }) } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go index 2868187f0129..1a2554048bc2 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go @@ -32,6 +32,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" utilwaitgroup "k8s.io/apimachinery/pkg/util/waitgroup" "k8s.io/apimachinery/pkg/version" @@ -48,7 +49,6 @@ import ( "k8s.io/apiserver/pkg/server/routes" "k8s.io/apiserver/pkg/storageversion" utilfeature "k8s.io/apiserver/pkg/util/feature" - utilopenapi "k8s.io/apiserver/pkg/util/openapi" restclient "k8s.io/client-go/rest" "k8s.io/klog/v2" openapibuilder2 "k8s.io/kube-openapi/pkg/builder" @@ -56,7 +56,6 @@ import ( "k8s.io/kube-openapi/pkg/handler" "k8s.io/kube-openapi/pkg/handler3" openapiutil "k8s.io/kube-openapi/pkg/util" - openapiproto "k8s.io/kube-openapi/pkg/util/proto" "k8s.io/kube-openapi/pkg/validation/spec" "k8s.io/utils/clock" ) @@ -136,6 +135,10 @@ type GenericAPIServer struct { // Handler holds the handlers being used by this API server Handler *APIServerHandler + // UnprotectedDebugSocket is used to serve pprof information in a unix-domain socket. This socket is + // not protected by authentication/authorization. + UnprotectedDebugSocket *routes.DebugSocket + // listedPathProvider is a lister which provides the set of paths to show at / listedPathProvider routes.ListedPathProvider @@ -214,8 +217,10 @@ type GenericAPIServer struct { // delegationTarget is the next delegate in the chain. This is never nil. delegationTarget DelegationTarget - // HandlerChainWaitGroup allows you to wait for all chain handlers finish after the server shutdown. - HandlerChainWaitGroup *utilwaitgroup.SafeWaitGroup + // NonLongRunningRequestWaitGroup allows you to wait for all chain + // handlers associated with non long-running requests + // to complete while the server is shuting down. + NonLongRunningRequestWaitGroup *utilwaitgroup.SafeWaitGroup // ShutdownDelayDuration allows to block shutdown for some time, e.g. until endpoints pointing to this API server // have converged on all node. During this time, the API server keeps serving, /healthz will return 200, @@ -449,7 +454,7 @@ func (s *GenericAPIServer) PrepareRun() preparedGenericAPIServer { // | | | | | // | | ---------------| | // | | | | -// | | (HandlerChainWaitGroup::Wait) | +// | | (NonLongRunningRequestWaitGroup::Wait) | // | | | | // | | InFlightRequestsDrained (drainedCh) | // | | | | @@ -467,6 +472,14 @@ func (s preparedGenericAPIServer) Run(stopCh <-chan struct{}) error { // Clean up resources on shutdown. defer s.Destroy() + // If UDS profiling is enabled, start a local http server listening on that socket + if s.UnprotectedDebugSocket != nil { + go func() { + defer utilruntime.HandleCrash() + klog.Error(s.UnprotectedDebugSocket.Run(stopCh)) + }() + } + // spawn a new goroutine for closing the MuxAndDiscoveryComplete signal // registration happens during construction of the generic api server // the last server in the chain aggregates signals from the previous instances @@ -509,7 +522,7 @@ func (s preparedGenericAPIServer) Run(stopCh <-chan struct{}) error { // net/http waits for 1s for the peer to respond to a GO_AWAY frame, so // we should wait for a minimum of 2s shutdownTimeout = 2 * time.Second - klog.V(1).InfoS("[graceful-termination] using HTTP Server shutdown timeout", "ShutdownTimeout", shutdownTimeout) + klog.V(1).InfoS("[graceful-termination] using HTTP Server shutdown timeout", "shutdownTimeout", shutdownTimeout) } notAcceptingNewRequestCh := s.lifecycleSignals.NotAcceptingNewRequest @@ -571,7 +584,7 @@ func (s preparedGenericAPIServer) Run(stopCh <-chan struct{}) error { <-notAcceptingNewRequestCh.Signaled() // Wait for all requests to finish, which are bounded by the RequestTimeout variable. - // once HandlerChainWaitGroup.Wait is invoked, the apiserver is + // once NonLongRunningRequestWaitGroup.Wait is invoked, the apiserver is // expected to reject any incoming request with a {503, Retry-After} // response via the WithWaitGroup filter. On the contrary, we observe // that incoming request(s) get a 'connection refused' error, this is @@ -583,7 +596,7 @@ func (s preparedGenericAPIServer) Run(stopCh <-chan struct{}) error { // 'Server.Shutdown' will be invoked only after in-flight requests // have been drained. // TODO: can we consolidate these two modes of graceful termination? - s.HandlerChainWaitGroup.Wait() + s.NonLongRunningRequestWaitGroup.Wait() }() klog.V(1).Info("[graceful-termination] waiting for shutdown to be initiated") @@ -653,7 +666,7 @@ func (s preparedGenericAPIServer) NonBlockingRun(stopCh <-chan struct{}, shutdow } // installAPIResources is a private method for installing the REST storage backing each api groupversionresource -func (s *GenericAPIServer) installAPIResources(apiPrefix string, apiGroupInfo *APIGroupInfo, openAPIModels openapiproto.Models) error { +func (s *GenericAPIServer) installAPIResources(apiPrefix string, apiGroupInfo *APIGroupInfo, openAPIModels *spec.Swagger) error { var resourceInfos []*storageversion.ResourceInfo for _, groupVersion := range apiGroupInfo.PrioritizedVersions { if len(apiGroupInfo.VersionedResourcesStorageMap[groupVersion.Version]) == 0 { @@ -868,7 +881,7 @@ func NewDefaultAPIGroupInfo(group string, scheme *runtime.Scheme, parameterCodec } // getOpenAPIModels is a private method for getting the OpenAPI models -func (s *GenericAPIServer) getOpenAPIModels(apiPrefix string, apiGroupInfos ...*APIGroupInfo) (openapiproto.Models, error) { +func (s *GenericAPIServer) getOpenAPIModels(apiPrefix string, apiGroupInfos ...*APIGroupInfo) (*spec.Swagger, error) { if s.openAPIConfig == nil { return nil, nil } @@ -890,7 +903,7 @@ func (s *GenericAPIServer) getOpenAPIModels(apiPrefix string, apiGroupInfos ...* for _, apiGroupInfo := range apiGroupInfos { apiGroupInfo.StaticOpenAPISpec = openAPISpec } - return utilopenapi.ToProtoModels(openAPISpec) + return openAPISpec, nil } // getResourceNamesForGroup is a private method for getting the canonical names for each resource to build in an api group diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/authorization.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/authorization.go index c31ce47f8ca3..9b2dcb3fff7a 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/authorization.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/authorization.go @@ -38,9 +38,8 @@ import ( // DelegatingAuthorizationOptions provides an easy way for composing API servers to delegate their authorization to // the root kube API server. // WARNING: never assume that every authenticated incoming request already does authorization. -// -// The aggregator in the kube API server does this today, but this behaviour is not -// guaranteed in the future. +// The aggregator in the kube API server does this today, but this behaviour is not +// guaranteed in the future. type DelegatingAuthorizationOptions struct { // RemoteKubeConfigFile is the file to use to connect to a "normal" kube API server which hosts the // SubjectAccessReview.authorization.k8s.io endpoint for checking tokens. diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go index c95717c532ea..8e53903fdf01 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go @@ -49,6 +49,8 @@ import ( "k8s.io/apiserver/pkg/storage/value/encrypt/identity" "k8s.io/apiserver/pkg/storage/value/encrypt/secretbox" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog/v2" + kmsservice "k8s.io/kms/service" ) const ( @@ -57,11 +59,18 @@ const ( secretboxTransformerPrefixV1 = "k8s:enc:secretbox:v1:" kmsTransformerPrefixV1 = "k8s:enc:kms:v1:" kmsTransformerPrefixV2 = "k8s:enc:kms:v2:" + kmsPluginHealthzInterval = 1 * time.Minute kmsPluginHealthzNegativeTTL = 3 * time.Second kmsPluginHealthzPositiveTTL = 20 * time.Second kmsAPIVersionV1 = "v1" kmsAPIVersionV2 = "v2" - kmsReloadHealthCheckName = "kms-providers" + // this name is used for two different healthz endpoints: + // - when one or more KMS v2 plugins are in use and no KMS v1 plugins are in use + // in this case, all v2 plugins are probed via this single endpoint + // - when automatic reload of encryption config is enabled + // in this case, all KMS plugins are probed via this single endpoint + // the endpoint is present even if there are no KMS plugins configured (it is a no-op then) + kmsReloadHealthCheckName = "kms-providers" ) type kmsPluginHealthzResponse struct { @@ -78,9 +87,10 @@ type kmsPluginProbe struct { } type kmsv2PluginProbe struct { + keyID atomic.Pointer[string] name string ttl time.Duration - service envelopekmsv2.Service + service kmsservice.Service lastResponse *kmsPluginHealthzResponse l *sync.Mutex } @@ -133,15 +143,16 @@ type EncryptionConfiguration struct { } // LoadEncryptionConfig parses and validates the encryption config specified by filepath. -// It may launch multiple go routines whose lifecycle is controlled by stopCh. +// It may launch multiple go routines whose lifecycle is controlled by ctx. +// In case of an error, the caller is responsible for canceling ctx to clean up any go routines that may have been launched. // If reload is true, or KMS v2 plugins are used with no KMS v1 plugins, the returned slice of health checkers will always be of length 1. -func LoadEncryptionConfig(filepath string, reload bool, stopCh <-chan struct{}) (*EncryptionConfiguration, error) { +func LoadEncryptionConfig(ctx context.Context, filepath string, reload bool) (*EncryptionConfiguration, error) { config, contentHash, err := loadConfig(filepath, reload) if err != nil { return nil, fmt.Errorf("error while parsing file: %w", err) } - transformers, kmsHealthChecks, kmsUsed, err := getTransformerOverridesAndKMSPluginHealthzCheckers(config, stopCh) + transformers, kmsHealthChecks, kmsUsed, err := getTransformerOverridesAndKMSPluginHealthzCheckers(ctx, config) if err != nil { return nil, fmt.Errorf("error while building transformers: %w", err) } @@ -160,12 +171,15 @@ func LoadEncryptionConfig(filepath string, reload bool, stopCh <-chan struct{}) HealthChecks: kmsHealthChecks, EncryptionFileContentHash: contentHash, KMSCloseGracePeriod: 2 * kmsUsed.kmsTimeoutSum, - }, err + }, nil } -func getTransformerOverridesAndKMSPluginHealthzCheckers(config *apiserverconfig.EncryptionConfiguration, stopCh <-chan struct{}) (map[schema.GroupResource]value.Transformer, []healthz.HealthChecker, *kmsState, error) { +// getTransformerOverridesAndKMSPluginHealthzCheckers creates the set of transformers and KMS healthz checks based on the given config. +// It may launch multiple go routines whose lifecycle is controlled by ctx. +// In case of an error, the caller is responsible for canceling ctx to clean up any go routines that may have been launched. +func getTransformerOverridesAndKMSPluginHealthzCheckers(ctx context.Context, config *apiserverconfig.EncryptionConfiguration) (map[schema.GroupResource]value.Transformer, []healthz.HealthChecker, *kmsState, error) { var kmsHealthChecks []healthz.HealthChecker - transformers, probes, kmsUsed, err := getTransformerOverridesAndKMSPluginProbes(config, stopCh) + transformers, probes, kmsUsed, err := getTransformerOverridesAndKMSPluginProbes(ctx, config) if err != nil { return nil, nil, nil, err } @@ -181,7 +195,10 @@ type healthChecker interface { toHealthzCheck(idx int) healthz.HealthChecker } -func getTransformerOverridesAndKMSPluginProbes(config *apiserverconfig.EncryptionConfiguration, stopCh <-chan struct{}) (map[schema.GroupResource]value.Transformer, []healthChecker, *kmsState, error) { +// getTransformerOverridesAndKMSPluginProbes creates the set of transformers and KMS probes based on the given config. +// It may launch multiple go routines whose lifecycle is controlled by ctx. +// In case of an error, the caller is responsible for canceling ctx to clean up any go routines that may have been launched. +func getTransformerOverridesAndKMSPluginProbes(ctx context.Context, config *apiserverconfig.EncryptionConfiguration) (map[schema.GroupResource]value.Transformer, []healthChecker, *kmsState, error) { resourceToPrefixTransformer := map[schema.GroupResource][]value.PrefixTransformer{} var probes []healthChecker var kmsUsed kmsState @@ -190,14 +207,11 @@ func getTransformerOverridesAndKMSPluginProbes(config *apiserverconfig.Encryptio for _, resourceConfig := range config.Resources { resourceConfig := resourceConfig - transformers, p, used, err := prefixTransformersAndProbes(resourceConfig, stopCh) + transformers, p, used, err := prefixTransformersAndProbes(ctx, resourceConfig) if err != nil { return nil, nil, nil, err } - kmsUsed.v1Used = kmsUsed.v1Used || used.v1Used - kmsUsed.v2Used = kmsUsed.v2Used || used.v2Used - - kmsUsed.kmsTimeoutSum += used.kmsTimeoutSum + kmsUsed.accumulate(used) // For each resource, create a list of providers to use for _, resource := range resourceConfig.Resources { @@ -262,6 +276,10 @@ func (h *kmsv2PluginProbe) check(ctx context.Context) error { h.ttl = kmsPluginHealthzNegativeTTL return fmt.Errorf("failed to perform status section of the healthz check for KMS Provider %s, error: %w", h.name, err) } + // we coast on the last valid key ID that we have observed + if err := envelopekmsv2.ValidateKeyID(p.KeyID); err == nil { + h.keyID.Store(&p.KeyID) + } if err := isKMSv2ProviderHealthy(h.name, p); err != nil { h.lastResponse = &kmsPluginHealthzResponse{err: err, received: time.Now()} @@ -274,8 +292,17 @@ func (h *kmsv2PluginProbe) check(ctx context.Context) error { return nil } +// getCurrentKeyID returns the latest keyID from the last Status() call or err if keyID is empty +func (h *kmsv2PluginProbe) getCurrentKeyID(ctx context.Context) (string, error) { + keyID := *h.keyID.Load() + if len(keyID) == 0 { + return "", fmt.Errorf("got unexpected empty keyID") + } + return keyID, nil +} + // isKMSv2ProviderHealthy checks if the KMSv2-Plugin is healthy. -func isKMSv2ProviderHealthy(name string, response *envelopekmsv2.StatusResponse) error { +func isKMSv2ProviderHealthy(name string, response *kmsservice.StatusResponse) error { var errs []error if response.Healthz != "ok" { errs = append(errs, fmt.Errorf("got unexpected healthz status: %s", response.Healthz)) @@ -283,7 +310,7 @@ func isKMSv2ProviderHealthy(name string, response *envelopekmsv2.StatusResponse) if response.Version != envelopekmsv2.KMSAPIVersion { errs = append(errs, fmt.Errorf("expected KMSv2 API version %s, got %s", envelopekmsv2.KMSAPIVersion, response.Version)) } - if len(response.KeyID) == 0 { + if err := envelopekmsv2.ValidateKeyID(response.KeyID); err != nil { errs = append(errs, fmt.Errorf("expected KMSv2 KeyID to be set, got %s", response.KeyID)) } @@ -326,7 +353,10 @@ func loadConfig(filepath string, reload bool) (*apiserverconfig.EncryptionConfig return config, computeEncryptionConfigHash(data), validation.ValidateEncryptionConfiguration(config, reload).ToAggregate() } -func prefixTransformersAndProbes(config apiserverconfig.ResourceConfiguration, stopCh <-chan struct{}) ([]value.PrefixTransformer, []healthChecker, *kmsState, error) { +// prefixTransformersAndProbes creates the set of transformers and KMS probes based on the given resource config. +// It may launch multiple go routines whose lifecycle is controlled by ctx. +// In case of an error, the caller is responsible for canceling ctx to clean up any go routines that may have been launched. +func prefixTransformersAndProbes(ctx context.Context, config apiserverconfig.ResourceConfiguration) ([]value.PrefixTransformer, []healthChecker, *kmsState, error) { var transformers []value.PrefixTransformer var probes []healthChecker var kmsUsed kmsState @@ -351,14 +381,10 @@ func prefixTransformersAndProbes(config apiserverconfig.ResourceConfiguration, s transformer, transformerErr = secretboxPrefixTransformer(provider.Secretbox) case provider.KMS != nil: - transformer, probe, used, transformerErr = kmsPrefixTransformer(provider.KMS, stopCh) + transformer, probe, used, transformerErr = kmsPrefixTransformer(ctx, provider.KMS) if transformerErr == nil { probes = append(probes, probe) - kmsUsed.v1Used = kmsUsed.v1Used || used.v1Used - kmsUsed.v2Used = kmsUsed.v2Used || used.v2Used - - // calculate the maximum timeout for all KMS providers - kmsUsed.kmsTimeoutSum += used.kmsTimeoutSum + kmsUsed.accumulate(used) } case provider.Identity != nil: @@ -497,10 +523,20 @@ type kmsState struct { kmsTimeoutSum time.Duration } -func kmsPrefixTransformer(config *apiserverconfig.KMSConfiguration, stopCh <-chan struct{}) (value.PrefixTransformer, healthChecker, *kmsState, error) { - // we ignore the cancel func because this context should only be canceled when stopCh is closed - ctx, _ := wait.ContextForChannel(stopCh) +// accumulate computes the KMS state by: +// - determining which KMS plugin versions are in use +// - calculating kmsTimeoutSum which is used as transformTracker.kmsCloseGracePeriod +// DynamicTransformers.Set waits for this period before closing old transformers after a config reload +func (s *kmsState) accumulate(other *kmsState) { + s.v1Used = s.v1Used || other.v1Used + s.v2Used = s.v2Used || other.v2Used + s.kmsTimeoutSum += other.kmsTimeoutSum +} +// kmsPrefixTransformer creates a KMS transformer and probe based on the given KMS config. +// It may launch multiple go routines whose lifecycle is controlled by ctx. +// In case of an error, the caller is responsible for canceling ctx to clean up any go routines that may have been launched. +func kmsPrefixTransformer(ctx context.Context, config *apiserverconfig.KMSConfiguration) (value.PrefixTransformer, healthChecker, *kmsState, error) { kmsName := config.Name switch config.APIVersion { case kmsAPIVersionV1: @@ -542,10 +578,27 @@ func kmsPrefixTransformer(config *apiserverconfig.KMSConfiguration, stopCh <-cha l: &sync.Mutex{}, lastResponse: &kmsPluginHealthzResponse{}, } + // initialize keyID so that Load always works + keyID := "" + probe.keyID.Store(&keyID) + + // prime keyID by running the check inline once (this prevents unit tests from flaking) + // ignore the error here since we want to support the plugin starting up async with the API server + _ = probe.check(ctx) + // make sure that the plugin's key ID is reasonably up-to-date + go wait.PollUntilWithContext( + ctx, + kmsPluginHealthzInterval, + func(ctx context.Context) (bool, error) { + if err := probe.check(ctx); err != nil { + klog.V(2).ErrorS(err, "kms plugin failed health check probe", "name", kmsName) + } + return false, nil + }) // using AES-GCM by default for encrypting data with KMSv2 transformer := value.PrefixTransformer{ - Transformer: envelopekmsv2.NewEnvelopeTransformer(envelopeService, int(*config.CacheSize), aestransformer.NewGCMTransformer), + Transformer: envelopekmsv2.NewEnvelopeTransformer(envelopeService, probe.getCurrentKeyID, int(*config.CacheSize), aestransformer.NewGCMTransformer), Prefix: []byte(kmsTransformerPrefixV2 + kmsName + ":"), } @@ -606,6 +659,7 @@ func computeEncryptionConfigHash(data []byte) string { return fmt.Sprintf("%x", sha256.Sum256(data)) } +var _ ResourceTransformers = &DynamicTransformers{} var _ healthz.HealthChecker = &DynamicTransformers{} // DynamicTransformers holds transformers that may be dynamically updated via a single external actor, likely a controller. @@ -704,25 +758,23 @@ func (r *resourceTransformer) TransformToStorage(ctx context.Context, data []byt } func (r *resourceTransformer) transformer() value.Transformer { - transformer := r.transformTracker.Load().(*transformTracker).transformerOverrides[r.resource] - if transformer == nil { - return identity.NewEncryptCheckTransformer() - } - return transformer + return transformerFromOverrides(r.transformTracker.Load().(*transformTracker).transformerOverrides, r.resource) } type ResourceTransformers interface { TransformerForResource(resource schema.GroupResource) value.Transformer } -var _ ResourceTransformers = &DynamicTransformers{} var _ ResourceTransformers = &StaticTransformers{} type StaticTransformers map[schema.GroupResource]value.Transformer -// StaticTransformers func (s StaticTransformers) TransformerForResource(resource schema.GroupResource) value.Transformer { - transformer := s[resource] + return transformerFromOverrides(s, resource) +} + +func transformerFromOverrides(transformerOverrides map[schema.GroupResource]value.Transformer, resource schema.GroupResource) value.Transformer { + transformer := transformerOverrides[resource] if transformer == nil { return identity.NewEncryptCheckTransformer() } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/controller/controller.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/controller/controller.go index 35fc1dea0df2..b8c66826bf50 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/controller/controller.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/controller/controller.go @@ -49,27 +49,22 @@ type DynamicKMSEncryptionConfigContent struct { // dynamicTransformers updates the transformers when encryption config file changes. dynamicTransformers *encryptionconfig.DynamicTransformers - - // stopCh used here is a lifecycle signal of genericapiserver already drained while shutting down. - stopCh <-chan struct{} } -// NewDynamicKMSEncryptionConfiguration returns controller that dynamically reacts to changes in encryption config file. -func NewDynamicKMSEncryptionConfiguration( +// NewDynamicEncryptionConfiguration returns controller that dynamically reacts to changes in encryption config file. +func NewDynamicEncryptionConfiguration( name, filePath string, dynamicTransformers *encryptionconfig.DynamicTransformers, configContentHash string, - stopCh <-chan struct{}, ) *DynamicKMSEncryptionConfigContent { encryptionConfig := &DynamicKMSEncryptionConfigContent{ name: name, filePath: filePath, lastLoadedEncryptionConfigHash: configContentHash, dynamicTransformers: dynamicTransformers, - stopCh: stopCh, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), fmt.Sprintf("%s-hot-reload", name)), + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), name), } - encryptionConfig.queue.Add(workqueueKey) + encryptionConfig.queue.Add(workqueueKey) // to avoid missing any file changes that occur in between the initial load and Run return encryptionConfig } @@ -83,21 +78,21 @@ func (d *DynamicKMSEncryptionConfigContent) Run(ctx context.Context) { defer klog.InfoS("Shutting down controller", "name", d.name) // start worker for processing content - go wait.Until(d.runWorker, time.Second, ctx.Done()) + go wait.UntilWithContext(ctx, d.runWorker, time.Second) // start the loop that watches the encryption config file until stopCh is closed. - go wait.Until(func() { - if err := d.watchEncryptionConfigFile(ctx.Done()); err != nil { + go wait.UntilWithContext(ctx, func(ctx context.Context) { + if err := d.watchEncryptionConfigFile(ctx); err != nil { // if there is an error while setting up or handling the watches, this will ensure that we will process the config file. defer d.queue.Add(workqueueKey) klog.ErrorS(err, "Failed to watch encryption config file, will retry later") } - }, time.Second, ctx.Done()) + }, time.Second) <-ctx.Done() } -func (d *DynamicKMSEncryptionConfigContent) watchEncryptionConfigFile(stopCh <-chan struct{}) error { +func (d *DynamicKMSEncryptionConfigContent) watchEncryptionConfigFile(ctx context.Context) error { watcher, err := fsnotify.NewWatcher() if err != nil { return fmt.Errorf("error creating fsnotify watcher: %w", err) @@ -116,7 +111,7 @@ func (d *DynamicKMSEncryptionConfigContent) watchEncryptionConfigFile(stopCh <-c } case err := <-watcher.Errors: return fmt.Errorf("received fsnotify error: %w", err) - case <-stopCh: + case <-ctx.Done(): return nil } } @@ -142,13 +137,13 @@ func (d *DynamicKMSEncryptionConfigContent) handleWatchEvent(event fsnotify.Even } // runWorker to process file content -func (d *DynamicKMSEncryptionConfigContent) runWorker() { - for d.processNextWorkItem() { +func (d *DynamicKMSEncryptionConfigContent) runWorker(ctx context.Context) { + for d.processNextWorkItem(ctx) { } } // processNextWorkItem processes file content when there is a message in the queue. -func (d *DynamicKMSEncryptionConfigContent) processNextWorkItem() bool { +func (d *DynamicKMSEncryptionConfigContent) processNextWorkItem(serverCtx context.Context) bool { // key here is dummy item in the queue to trigger file content processing. key, quit := d.queue.Get() if quit { @@ -163,12 +158,15 @@ func (d *DynamicKMSEncryptionConfigContent) processNextWorkItem() bool { configChanged bool ) - // get context to close the new transformers. - ctx, closeTransformers := wait.ContextForChannel(d.stopCh) + // get context to close the new transformers (on error cases and on the next reload) + // serverCtx is attached to the API server's lifecycle so we will always close transformers on shut down + ctx, closeTransformers := context.WithCancel(serverCtx) defer func() { // TODO: increment success metric when updatedEffectiveConfig=true + // TODO can work queue metrics help here? + if !updatedEffectiveConfig { // avoid leaking if we're not using the newly constructed transformers (due to an error or them not being changed) closeTransformers() @@ -222,7 +220,7 @@ func (d *DynamicKMSEncryptionConfigContent) processEncryptionConfig(ctx context. err error, ) { // this code path will only execute if reload=true. So passing true explicitly. - encryptionConfiguration, err = encryptionconfig.LoadEncryptionConfig(d.filePath, true, ctx.Done()) + encryptionConfiguration, err = encryptionconfig.LoadEncryptionConfig(ctx, d.filePath, true) if err != nil { return nil, false, err } @@ -247,7 +245,12 @@ func (d *DynamicKMSEncryptionConfigContent) validateNewTransformersHealth( kmsPluginCloseGracePeriod = 10 * time.Second } - pollErr := wait.PollImmediate(100*time.Millisecond, kmsPluginCloseGracePeriod, func() (bool, error) { + // really make sure that the immediate check does not hang + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, kmsPluginCloseGracePeriod) + defer cancel() + + pollErr := wait.PollImmediateWithContext(ctx, 100*time.Millisecond, kmsPluginCloseGracePeriod, func(ctx context.Context) (bool, error) { // create a fake http get request to health check endpoint req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("/healthz/%s", kmsPluginHealthzCheck.Name()), nil) if err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/etcd.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/etcd.go index a570efc611e8..0741e2ba660c 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/etcd.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/etcd.go @@ -17,6 +17,7 @@ limitations under the License. package options import ( + "context" "fmt" "net/http" "strconv" @@ -33,7 +34,7 @@ import ( "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/server/healthz" "k8s.io/apiserver/pkg/server/options/encryptionconfig" - kmsconfigcontroller "k8s.io/apiserver/pkg/server/options/encryptionconfig/controller" + encryptionconfigcontroller "k8s.io/apiserver/pkg/server/options/encryptionconfig/controller" serverstorage "k8s.io/apiserver/pkg/server/storage" "k8s.io/apiserver/pkg/storage/storagebackend" storagefactory "k8s.io/apiserver/pkg/storage/storagebackend/factory" @@ -228,10 +229,10 @@ func (s *EtcdOptions) Complete( } if len(s.EncryptionProviderConfigFilepath) != 0 { - ctxTransformers, closeTransformers := wait.ContextForChannel(stopCh) - ctxServer, _ := wait.ContextForChannel(stopCh) // explicitly ignore cancel here because we do not own the server's lifecycle + ctxServer := wait.ContextForChannel(stopCh) + ctxTransformers, closeTransformers := context.WithCancel(ctxServer) - encryptionConfiguration, err := encryptionconfig.LoadEncryptionConfig(s.EncryptionProviderConfigFilepath, s.EncryptionProviderConfigAutomaticReload, ctxTransformers.Done()) + encryptionConfiguration, err := encryptionconfig.LoadEncryptionConfig(ctxTransformers, s.EncryptionProviderConfigFilepath, s.EncryptionProviderConfigAutomaticReload) if err != nil { // in case of error, we want to close partially initialized (if any) transformers closeTransformers() @@ -249,23 +250,19 @@ func (s *EtcdOptions) Complete( dynamicTransformers := encryptionconfig.NewDynamicTransformers(encryptionConfiguration.Transformers, encryptionConfiguration.HealthChecks[0], closeTransformers, encryptionConfiguration.KMSCloseGracePeriod) - s.resourceTransformers = dynamicTransformers - s.kmsPluginHealthzChecks = []healthz.HealthChecker{dynamicTransformers} - // add post start hook to start hot reload controller // adding this hook here will ensure that it gets configured exactly once err = addPostStartHook( "start-encryption-provider-config-automatic-reload", - func(hookContext server.PostStartHookContext) error { - kmsConfigController := kmsconfigcontroller.NewDynamicKMSEncryptionConfiguration( - "kms-encryption-config", + func(_ server.PostStartHookContext) error { + dynamicEncryptionConfigController := encryptionconfigcontroller.NewDynamicEncryptionConfiguration( + "encryption-provider-config-automatic-reload-controller", s.EncryptionProviderConfigFilepath, dynamicTransformers, encryptionConfiguration.EncryptionFileContentHash, - ctxServer.Done(), ) - go kmsConfigController.Run(ctxServer) + go dynamicEncryptionConfigController.Run(ctxServer) return nil }, @@ -275,6 +272,9 @@ func (s *EtcdOptions) Complete( closeTransformers() return fmt.Errorf("failed to add post start hook for kms encryption config hot reload controller: %w", err) } + + s.resourceTransformers = dynamicTransformers + s.kmsPluginHealthzChecks = []healthz.HealthChecker{dynamicTransformers} } else { s.resourceTransformers = encryptionconfig.StaticTransformers(encryptionConfiguration.Transformers) s.kmsPluginHealthzChecks = encryptionConfiguration.HealthChecks diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/feature.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/feature.go index e8a624184218..e87109bbd5a1 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/feature.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/feature.go @@ -25,6 +25,7 @@ import ( type FeatureOptions struct { EnableProfiling bool + DebugSocketPath string EnableContentionProfiling bool } @@ -33,6 +34,7 @@ func NewFeatureOptions() *FeatureOptions { return &FeatureOptions{ EnableProfiling: defaults.EnableProfiling, + DebugSocketPath: defaults.DebugSocketPath, EnableContentionProfiling: defaults.EnableContentionProfiling, } } @@ -46,6 +48,8 @@ func (o *FeatureOptions) AddFlags(fs *pflag.FlagSet) { "Enable profiling via web interface host:port/debug/pprof/") fs.BoolVar(&o.EnableContentionProfiling, "contention-profiling", o.EnableContentionProfiling, "Enable lock contention profiling, if profiling is enabled") + fs.StringVar(&o.DebugSocketPath, "debug-socket-path", o.DebugSocketPath, + "Use an unprotected (no authn/authz) unix-domain socket for profiling with the given path") } func (o *FeatureOptions) ApplyTo(c *server.Config) error { @@ -54,6 +58,7 @@ func (o *FeatureOptions) ApplyTo(c *server.Config) error { } c.EnableProfiling = o.EnableProfiling + c.DebugSocketPath = o.DebugSocketPath c.EnableContentionProfiling = o.EnableContentionProfiling return nil diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go index 09668646ae8c..44a67345c2a7 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go @@ -22,7 +22,6 @@ import ( "strings" "time" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apiserver/pkg/server" @@ -205,11 +204,6 @@ func (s *ServerRunOptions) AddUniversalFlags(fs *pflag.FlagSet) { fs.StringVar(&s.ExternalHost, "external-hostname", s.ExternalHost, "The hostname to use when generating externalized URLs for this master (e.g. Swagger API Docs or OpenID Discovery).") - deprecatedMasterServiceNamespace := metav1.NamespaceDefault - fs.StringVar(&deprecatedMasterServiceNamespace, "master-service-namespace", deprecatedMasterServiceNamespace, ""+ - "DEPRECATED: the namespace from which the Kubernetes master services should be injected into pods.") - fs.MarkDeprecated("master-service-namespace", "This flag will be removed in v1.27") - fs.IntVar(&s.MaxRequestsInFlight, "max-requests-inflight", s.MaxRequestsInFlight, ""+ "This and --max-mutating-requests-inflight are summed to determine the server's total concurrency limit "+ "(which must be positive) if --enable-priority-and-fairness is true. "+ diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/routes/debugsocket.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/routes/debugsocket.go new file mode 100644 index 000000000000..e7297b35f33f --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/routes/debugsocket.go @@ -0,0 +1,82 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package routes + +import ( + "fmt" + "net" + "net/http" + "net/http/pprof" + "os" + "path" +) + +// DebugSocket installs profiling and debugflag as a Unix-Domain socket. +type DebugSocket struct { + path string + mux *http.ServeMux +} + +// NewDebugSocket creates a new DebugSocket for the given path. +func NewDebugSocket(path string) *DebugSocket { + return &DebugSocket{ + path: path, + mux: http.NewServeMux(), + } +} + +// InstallProfiling installs profiling endpoints in the socket. +func (s *DebugSocket) InstallProfiling() { + s.mux.HandleFunc("/debug/pprof", redirectTo("/debug/pprof/")) + s.mux.HandleFunc("/debug/pprof/", pprof.Index) + s.mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) + s.mux.HandleFunc("/debug/pprof/profile", pprof.Profile) + s.mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + s.mux.HandleFunc("/debug/pprof/trace", pprof.Trace) +} + +// InstallDebugFlag installs debug flag endpoints in the socket. +func (s *DebugSocket) InstallDebugFlag(flag string, handler func(http.ResponseWriter, *http.Request)) { + f := DebugFlags{} + s.mux.HandleFunc("/debug/flags", f.Index) + s.mux.HandleFunc("/debug/flags/", f.Index) + + url := path.Join("/debug/flags", flag) + s.mux.HandleFunc(url, handler) + + f.addFlag(flag) +} + +// Run starts the server and waits for stopCh to be closed to close the server. +func (s *DebugSocket) Run(stopCh <-chan struct{}) error { + if err := os.Remove(s.path); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("failed to remove (%v): %v", s.path, err) + } + + l, err := net.Listen("unix", s.path) + if err != nil { + return fmt.Errorf("listen error (%v): %v", s.path, err) + } + defer l.Close() + + srv := http.Server{Handler: s.mux} + go func() { + <-stopCh + srv.Close() + }() + return srv.Serve(l) +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go index dfa21157906d..bc6500909c3b 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go @@ -621,9 +621,11 @@ func (c *Cacher) Get(ctx context.Context, key string, opts storage.GetOptions, o func shouldDelegateList(opts storage.ListOptions) bool { resourceVersion := opts.ResourceVersion pred := opts.Predicate + match := opts.ResourceVersionMatch pagingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking) hasContinuation := pagingEnabled && len(pred.Continue) > 0 hasLimit := pagingEnabled && pred.Limit > 0 && resourceVersion != "0" + unsupportedMatch := match != "" && match != metav1.ResourceVersionMatchNotOlderThan // If resourceVersion is not specified, serve it from underlying // storage (for backward compatibility). If a continuation is @@ -631,7 +633,7 @@ func shouldDelegateList(opts storage.ListOptions) bool { // Limits are only sent to storage when resourceVersion is non-zero // since the watch cache isn't able to perform continuations, and // limits are ignored when resource version is zero - return resourceVersion == "" || hasContinuation || hasLimit || opts.ResourceVersionMatch == metav1.ResourceVersionMatchExact + return resourceVersion == "" || hasContinuation || hasLimit || unsupportedMatch } func (c *Cacher) listItems(ctx context.Context, listRV uint64, key string, pred storage.SelectionPredicate, recursive bool) ([]interface{}, uint64, string, error) { @@ -657,6 +659,11 @@ func (c *Cacher) GetList(ctx context.Context, key string, opts storage.ListOptio return c.storage.GetList(ctx, key, opts, listObj) } + match := opts.ResourceVersionMatch + if match != metav1.ResourceVersionMatchNotOlderThan && match != "" { + return fmt.Errorf("unknown ResourceVersionMatch value: %v", match) + } + // If resourceVersion is specified, serve it from cache. // It's guaranteed that the returned value is at least that // fresh as the given resourceVersion. @@ -715,6 +722,10 @@ func (c *Cacher) GetList(ctx context.Context, key string, opts storage.ListOptio listVal.Set(reflect.Append(listVal, reflect.ValueOf(elem.Object).Elem())) } } + if listVal.IsNil() { + // Ensure that we never return a nil Items pointer in the result for consistency. + listVal.Set(reflect.MakeSlice(listVal.Type(), 0, 0)) + } span.AddEvent("Filtered items", attribute.Int("count", listVal.Len())) if c.versioner != nil { if err := c.versioner.UpdateList(listObj, readResourceVersion, "", nil); err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go index bbbeee361f8b..3e67f5132859 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go @@ -463,6 +463,20 @@ func (w *watchCache) waitUntilFreshAndBlock(ctx context.Context, resourceVersion return nil } +type sortableStoreElements []interface{} + +func (s sortableStoreElements) Len() int { + return len(s) +} + +func (s sortableStoreElements) Less(i, j int) bool { + return s[i].(*storeElement).Key < s[j].(*storeElement).Key +} + +func (s sortableStoreElements) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + // WaitUntilFreshAndList returns list of pointers to `storeElement` objects along // with their ResourceVersion and the name of the index, if any, that was used. func (w *watchCache) WaitUntilFreshAndList(ctx context.Context, resourceVersion uint64, matchValues []storage.MatchValue) ([]interface{}, uint64, string, error) { @@ -472,16 +486,21 @@ func (w *watchCache) WaitUntilFreshAndList(ctx context.Context, resourceVersion return nil, 0, "", err } - // This isn't the place where we do "final filtering" - only some "prefiltering" is happening here. So the only - // requirement here is to NOT miss anything that should be returned. We can return as many non-matching items as we - // want - they will be filtered out later. The fact that we return less things is only further performance improvement. - // TODO: if multiple indexes match, return the one with the fewest items, so as to do as much filtering as possible. - for _, matchValue := range matchValues { - if result, err := w.store.ByIndex(matchValue.IndexName, matchValue.Value); err == nil { - return result, w.resourceVersion, matchValue.IndexName, nil + result, rv, index, err := func() ([]interface{}, uint64, string, error) { + // This isn't the place where we do "final filtering" - only some "prefiltering" is happening here. So the only + // requirement here is to NOT miss anything that should be returned. We can return as many non-matching items as we + // want - they will be filtered out later. The fact that we return less things is only further performance improvement. + // TODO: if multiple indexes match, return the one with the fewest items, so as to do as much filtering as possible. + for _, matchValue := range matchValues { + if result, err := w.store.ByIndex(matchValue.IndexName, matchValue.Value); err == nil { + return result, w.resourceVersion, matchValue.IndexName, nil + } } - } - return w.store.List(), w.resourceVersion, "", nil + return w.store.List(), w.resourceVersion, "", nil + }() + + sort.Sort(sortableStoreElements(result)) + return result, rv, index, err } // WaitUntilFreshAndGet returns a pointers to object. diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/latency_tracker.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/latency_tracker.go index 96d592e7907b..f60210f96c7d 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/latency_tracker.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/latency_tracker.go @@ -47,8 +47,7 @@ func NewETCDLatencyTracker(delegate clientv3.KV) clientv3.KV { // tracking function TrackStorageLatency is thread safe. // // NOTE: Compact is an asynchronous process and is not associated with -// -// any request, so we will not be tracking its latency. +// any request, so we will not be tracking its latency. type clientV3KVLatencyTracker struct { clientv3.KV } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go index 51e5b9012b95..463a84d7b912 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go @@ -782,6 +782,10 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption options = append(options, clientv3.WithRev(withRev)) } } + if v.IsNil() { + // Ensure that we never return a nil Items pointer in the result for consistency. + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } // instruct the client to begin querying from immediately after the last key we returned // we never return a key that the client wouldn't be allowed to see diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/envelope.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/envelope.go index 726e3053e3f2..01a62729e919 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/envelope.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/envelope.go @@ -35,6 +35,7 @@ import ( kmstypes "k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/v2alpha1" "k8s.io/apiserver/pkg/storage/value/encrypt/envelope/metrics" "k8s.io/klog/v2" + kmsservice "k8s.io/kms/service" "k8s.io/utils/lru" ) @@ -49,18 +50,12 @@ const ( encryptedDEKMaxSize = 1 * 1024 // 1 kB ) -// Service allows encrypting and decrypting data using an external Key Management Service. -type Service interface { - // Decrypt a given bytearray to obtain the original data as bytes. - Decrypt(ctx context.Context, uid string, req *DecryptRequest) ([]byte, error) - // Encrypt bytes to a ciphertext. - Encrypt(ctx context.Context, uid string, data []byte) (*EncryptResponse, error) - // Status returns the status of the KMS. - Status(ctx context.Context) (*StatusResponse, error) -} +type KeyIDGetterFunc func(context.Context) (keyID string, err error) type envelopeTransformer struct { - envelopeService Service + envelopeService kmsservice.Service + + keyIDGetter KeyIDGetterFunc // transformers is a thread-safe LRU cache which caches decrypted DEKs indexed by their encrypted form. transformers *lru.Cache @@ -72,32 +67,11 @@ type envelopeTransformer struct { cacheEnabled bool } -// EncryptResponse is the response from the Envelope service when encrypting data. -type EncryptResponse struct { - Ciphertext []byte - KeyID string - Annotations map[string][]byte -} - -// DecryptRequest is the request to the Envelope service when decrypting data. -type DecryptRequest struct { - Ciphertext []byte - KeyID string - Annotations map[string][]byte -} - -// StatusResponse is the response from the Envelope service when getting the status of the service. -type StatusResponse struct { - Version string - Healthz string - KeyID string -} - // NewEnvelopeTransformer returns a transformer which implements a KEK-DEK based envelope encryption scheme. // It uses envelopeService to encrypt and decrypt DEKs. Respective DEKs (in encrypted form) are prepended to // the data items they encrypt. A cache (of size cacheSize) is maintained to store the most recently // used decrypted DEKs in memory. -func NewEnvelopeTransformer(envelopeService Service, cacheSize int, baseTransformerFunc func(cipher.Block) value.Transformer) value.Transformer { +func NewEnvelopeTransformer(envelopeService kmsservice.Service, keyIDGetter KeyIDGetterFunc, cacheSize int, baseTransformerFunc func(cipher.Block) value.Transformer) value.Transformer { var cache *lru.Cache if cacheSize > 0 { @@ -108,6 +82,7 @@ func NewEnvelopeTransformer(envelopeService Service, cacheSize int, baseTransfor return &envelopeTransformer{ envelopeService: envelopeService, + keyIDGetter: keyIDGetter, transformers: cache, baseTransformerFunc: baseTransformerFunc, cacheEnabled: cacheSize > 0, @@ -133,7 +108,7 @@ func (t *envelopeTransformer) TransformFromStorage(ctx context.Context, data []b } uid := string(uuid.NewUUID()) klog.V(6).InfoS("Decrypting content using envelope service", "uid", uid, "key", string(dataCtx.AuthenticatedData())) - key, err := t.envelopeService.Decrypt(ctx, uid, &DecryptRequest{ + key, err := t.envelopeService.Decrypt(ctx, uid, &kmsservice.DecryptRequest{ Ciphertext: encryptedObject.EncryptedDEK, KeyID: encryptedObject.KeyID, Annotations: encryptedObject.Annotations, @@ -148,7 +123,21 @@ func (t *envelopeTransformer) TransformFromStorage(ctx context.Context, data []b } } - return transformer.TransformFromStorage(ctx, encryptedObject.EncryptedData, dataCtx) + out, stale, err := transformer.TransformFromStorage(ctx, encryptedObject.EncryptedData, dataCtx) + if err != nil { + return nil, false, err + } + if stale { + return out, stale, nil + } + + // Check keyID freshness in addition to data staleness + keyID, err := t.keyIDGetter(ctx) + if err != nil { + return nil, false, err + } + return out, encryptedObject.KeyID != keyID, nil + } // TransformToStorage encrypts data to be written to disk using envelope encryption. @@ -160,7 +149,7 @@ func (t *envelopeTransformer) TransformToStorage(ctx context.Context, data []byt } uid := string(uuid.NewUUID()) - klog.V(6).InfoS("Encrypting content using envelope service", "uid", uid, "key", string(dataCtx.AuthenticatedData())) + klog.V(6).InfoS("encrypting content using envelope service", "uid", uid, "key", string(dataCtx.AuthenticatedData())) resp, err := t.envelopeService.Encrypt(ctx, uid, newKey) if err != nil { return nil, fmt.Errorf("failed to encrypt DEK, error: %w", err) @@ -183,6 +172,12 @@ func (t *envelopeTransformer) TransformToStorage(ctx context.Context, data []byt Annotations: resp.Annotations, } + // Check keyID freshness and write to log if key IDs are different + statusKeyID, err := t.keyIDGetter(ctx) + if err == nil && encObject.KeyID != statusKeyID { + klog.V(2).InfoS("observed different key IDs when encrypting content using kms v2 envelope service", "uid", uid, "objectKeyID", encObject.KeyID, "statusKeyID", statusKeyID) + } + // Serialize the EncryptedObject to a byte array. return t.doEncode(encObject) } @@ -261,7 +256,7 @@ func validateEncryptedObject(o *kmstypes.EncryptedObject) error { if err := validateEncryptedDEK(o.EncryptedDEK); err != nil { return fmt.Errorf("failed to validate encrypted DEK: %w", err) } - if err := validateKeyID(o.KeyID); err != nil { + if err := ValidateKeyID(o.KeyID); err != nil { return fmt.Errorf("failed to validate key id: %w", err) } if err := validateAnnotations(o.Annotations); err != nil { @@ -301,10 +296,10 @@ func validateAnnotations(annotations map[string][]byte) error { return utilerrors.NewAggregate(errs) } -// validateKeyID tests the following: +// ValidateKeyID tests the following: // 1. The keyID is not empty. // 2. The size of keyID is less than 1 kB. -func validateKeyID(keyID string) error { +func ValidateKeyID(keyID string) error { if len(keyID) == 0 { return fmt.Errorf("keyID is empty") } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/grpc_service.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/grpc_service.go index 692aeef53a9b..cd22fa8d9321 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/grpc_service.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/grpc_service.go @@ -30,6 +30,7 @@ import ( "k8s.io/apiserver/pkg/storage/value/encrypt/envelope/util" "k8s.io/klog/v2" kmsapi "k8s.io/kms/apis/v2alpha1" + kmsservice "k8s.io/kms/service" ) const ( @@ -45,7 +46,7 @@ type gRPCService struct { } // NewGRPCService returns an envelope.Service which use gRPC to communicate the remote KMS provider. -func NewGRPCService(ctx context.Context, endpoint string, callTimeout time.Duration) (Service, error) { +func NewGRPCService(ctx context.Context, endpoint string, callTimeout time.Duration) (kmsservice.Service, error) { klog.V(4).Infof("Configure KMS provider with endpoint: %s", endpoint) addr, err := util.ParseEndpoint(endpoint) @@ -88,7 +89,7 @@ func NewGRPCService(ctx context.Context, endpoint string, callTimeout time.Durat } // Decrypt a given data string to obtain the original byte data. -func (g *gRPCService) Decrypt(ctx context.Context, uid string, req *DecryptRequest) ([]byte, error) { +func (g *gRPCService) Decrypt(ctx context.Context, uid string, req *kmsservice.DecryptRequest) ([]byte, error) { ctx, cancel := context.WithTimeout(ctx, g.callTimeout) defer cancel() @@ -106,7 +107,7 @@ func (g *gRPCService) Decrypt(ctx context.Context, uid string, req *DecryptReque } // Encrypt bytes to a string ciphertext. -func (g *gRPCService) Encrypt(ctx context.Context, uid string, plaintext []byte) (*EncryptResponse, error) { +func (g *gRPCService) Encrypt(ctx context.Context, uid string, plaintext []byte) (*kmsservice.EncryptResponse, error) { ctx, cancel := context.WithTimeout(ctx, g.callTimeout) defer cancel() @@ -118,7 +119,7 @@ func (g *gRPCService) Encrypt(ctx context.Context, uid string, plaintext []byte) if err != nil { return nil, err } - return &EncryptResponse{ + return &kmsservice.EncryptResponse{ Ciphertext: response.Ciphertext, KeyID: response.KeyId, Annotations: response.Annotations, @@ -126,7 +127,7 @@ func (g *gRPCService) Encrypt(ctx context.Context, uid string, plaintext []byte) } // Status returns the status of the KMSv2 provider. -func (g *gRPCService) Status(ctx context.Context) (*StatusResponse, error) { +func (g *gRPCService) Status(ctx context.Context) (*kmsservice.StatusResponse, error) { ctx, cancel := context.WithTimeout(ctx, g.callTimeout) defer cancel() @@ -135,5 +136,5 @@ func (g *gRPCService) Status(ctx context.Context) (*StatusResponse, error) { if err != nil { return nil, err } - return &StatusResponse{Version: response.Version, Healthz: response.Healthz, KeyID: response.KeyId}, nil + return &kmsservice.StatusResponse{Version: response.Version, Healthz: response.Healthz, KeyID: response.KeyId}, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/v2alpha1/api.pb.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/v2alpha1/api.pb.go index 307b0b248c95..bab4e14fa6a8 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/v2alpha1/api.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/v2alpha1/api.pb.go @@ -111,18 +111,21 @@ func init() { func init() { proto.RegisterFile("api.proto", fileDescriptor_00212fb1f9d3bf1c) } var fileDescriptor_00212fb1f9d3bf1c = []byte{ - // 200 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4c, 0x2c, 0xc8, 0xd4, - 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x28, 0x33, 0x4a, 0xcc, 0x29, 0xc8, 0x48, 0x34, 0x54, - 0xfa, 0xcf, 0xc8, 0xc5, 0xef, 0x9a, 0x97, 0x5c, 0x54, 0x59, 0x50, 0x92, 0x9a, 0xe2, 0x9f, 0x94, - 0x95, 0x9a, 0x5c, 0x22, 0xa4, 0xc2, 0xc5, 0x9b, 0x0a, 0x13, 0x72, 0x49, 0x2c, 0x49, 0x94, 0x60, - 0x54, 0x60, 0xd4, 0xe0, 0x09, 0x42, 0x15, 0x14, 0x12, 0xe1, 0x62, 0xcd, 0x4e, 0xad, 0xf4, 0x74, - 0x91, 0x60, 0x52, 0x60, 0xd4, 0xe0, 0x0c, 0x82, 0x70, 0x84, 0x94, 0xb8, 0x78, 0x10, 0xca, 0x5c, - 0xbd, 0x25, 0x98, 0xc1, 0x5a, 0x51, 0xc4, 0x84, 0x7c, 0xb8, 0xb8, 0x13, 0xf3, 0xf2, 0xf2, 0x4b, - 0x12, 0x4b, 0x32, 0xf3, 0xf3, 0x8a, 0x25, 0x58, 0x14, 0x98, 0x35, 0xb8, 0x8d, 0xb4, 0xf4, 0x60, - 0x6e, 0xd2, 0x43, 0x73, 0x8f, 0x9e, 0x23, 0x42, 0xb1, 0x6b, 0x5e, 0x49, 0x51, 0x65, 0x10, 0xb2, - 0x76, 0x29, 0x3b, 0x2e, 0x01, 0x74, 0x05, 0x42, 0x02, 0x5c, 0xcc, 0xd9, 0xa9, 0x95, 0x60, 0x77, - 0x73, 0x06, 0x81, 0x98, 0x20, 0xd7, 0x96, 0x25, 0xe6, 0x94, 0xa6, 0x82, 0x5d, 0xcb, 0x13, 0x04, - 0xe1, 0x58, 0x31, 0x59, 0x30, 0x26, 0xb1, 0x81, 0x83, 0xc4, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, - 0x88, 0x8c, 0xbb, 0x4e, 0x1f, 0x01, 0x00, 0x00, + // 252 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x90, 0xc1, 0x4b, 0xf3, 0x30, + 0x18, 0xc6, 0xe9, 0xfa, 0x7d, 0x62, 0xb3, 0x8a, 0x23, 0x78, 0x28, 0x9e, 0xca, 0xf0, 0x50, 0x3c, + 0x24, 0x58, 0x2f, 0xc3, 0x83, 0xe0, 0x68, 0x0f, 0xa2, 0x20, 0xe4, 0xe8, 0xed, 0xdd, 0x7c, 0x99, + 0x35, 0x35, 0x09, 0x49, 0x0c, 0xf4, 0xaf, 0x57, 0x96, 0x59, 0xe6, 0x76, 0xcb, 0xf3, 0xf0, 0x7b, + 0xe0, 0x97, 0x97, 0x64, 0x60, 0x3a, 0x66, 0xac, 0xf6, 0x9a, 0x9e, 0x86, 0x1a, 0x7a, 0xf3, 0x0e, + 0x37, 0xf3, 0xef, 0x84, 0x9c, 0xb7, 0x6a, 0x6d, 0x07, 0xe3, 0xf1, 0xed, 0x65, 0xf5, 0x81, 0x6b, + 0x4f, 0xaf, 0xc8, 0x19, 0x8e, 0x55, 0x03, 0x1e, 0x8a, 0xa4, 0x4c, 0xaa, 0x5c, 0x1c, 0x96, 0xf4, + 0x82, 0xfc, 0x97, 0x38, 0x3c, 0x36, 0xc5, 0xa4, 0x4c, 0xaa, 0x4c, 0xec, 0x02, 0x9d, 0x93, 0x7c, + 0x8f, 0xb5, 0x4f, 0x45, 0x1a, 0xa7, 0x07, 0x1d, 0x7d, 0x26, 0x53, 0x50, 0x4a, 0x7b, 0xf0, 0x9d, + 0x56, 0xae, 0xf8, 0x57, 0xa6, 0xd5, 0xb4, 0xbe, 0x66, 0xa3, 0x13, 0x3b, 0xf2, 0x61, 0x0f, 0x7b, + 0xb8, 0x55, 0xde, 0x0e, 0xe2, 0xef, 0xfc, 0xf2, 0x9e, 0xcc, 0x8e, 0x01, 0x3a, 0x23, 0xa9, 0xc4, + 0x21, 0x7a, 0x67, 0x62, 0xfb, 0xdc, 0xda, 0x06, 0xe8, 0xbf, 0x30, 0xda, 0xe6, 0x62, 0x17, 0xee, + 0x26, 0x8b, 0x64, 0xd9, 0xbc, 0x2e, 0xe5, 0xc2, 0xb1, 0x4e, 0x73, 0x30, 0x9d, 0x43, 0x1b, 0xd0, + 0x72, 0x23, 0x37, 0xdc, 0x79, 0x6d, 0x61, 0x83, 0x3c, 0x92, 0xfc, 0xf7, 0x03, 0x1c, 0x55, 0xc0, + 0x5e, 0x1b, 0xe4, 0xf2, 0xd3, 0x85, 0x9a, 0x8f, 0xce, 0xab, 0x93, 0x78, 0xd8, 0xdb, 0x9f, 0x00, + 0x00, 0x00, 0xff, 0xff, 0x37, 0xf9, 0x4d, 0xc2, 0x65, 0x01, 0x00, 0x00, } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/v2alpha1/api.proto b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/v2alpha1/api.proto index e3b978b05edc..78995f73a26e 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/v2alpha1/api.proto +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/v2alpha1/api.proto @@ -14,10 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -// To regenerate api.pb.go run hack/update-generated-kms.sh +// To regenerate api.pb.go run `hack/update-codegen.sh protobindings` syntax = "proto3"; package v2alpha1; +option go_package = "k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/v2alpha1"; // EncryptedObject is the representation of data stored in etcd after envelope encryption. message EncryptedObject { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/flowcontrol/conc_alloc.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/flowcontrol/conc_alloc.go index 43603907114e..904f4fce2f60 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/flowcontrol/conc_alloc.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/flowcontrol/conc_alloc.go @@ -108,11 +108,12 @@ const epsilon = 0.0000001 // if possible otherwise returns an error saying why it is impossible. // `allocs` sums to `requiredSum`. // For each J in [0, len(classes)): -// (1) `classes[J].lowerBound <= allocs[J] <= classes[J].upperBound` and -// (2) exactly one of the following is true: -// (2a) `allocs[J] == fairProp * classes[J].target`, -// (2b) `allocs[J] == classes[J].lowerBound && classes[J].lowerBound > fairProp * classes[J].target`, or -// (2c) `allocs[J] == classes[J].upperBound && classes[J].upperBound < fairProp * classes[J].target`. +// 1. `classes[J].lowerBound <= allocs[J] <= classes[J].upperBound` and +// 2. exactly one of the following is true: +// 2a. `allocs[J] == fairProp * classes[J].target`, +// 2b. `allocs[J] == classes[J].lowerBound && classes[J].lowerBound > fairProp * classes[J].target`, or +// 2c. `allocs[J] == classes[J].upperBound && classes[J].upperBound < fairProp * classes[J].target`. +// // Each allocProblemItem is required to have `target >= lowerBound >= 0` and `upperBound >= lowerBound`. // A target smaller than MinTarget is treated as if it were MinTarget. func computeConcurrencyAllocation(requiredSum int, classes []allocProblemItem) ([]float64, float64, error) { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go index 75d70a0ad46c..130746a411ef 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go @@ -147,8 +147,11 @@ func key(requestInfo *apirequest.RequestInfo) string { // staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go func shouldListFromStorage(query url.Values, opts *metav1.ListOptions) bool { resourceVersion := opts.ResourceVersion + match := opts.ResourceVersionMatch pagingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking) hasContinuation := pagingEnabled && len(opts.Continue) > 0 hasLimit := pagingEnabled && opts.Limit > 0 && resourceVersion != "0" - return resourceVersion == "" || hasContinuation || hasLimit || opts.ResourceVersionMatch == metav1.ResourceVersionMatchExact + unsupportedMatch := match != "" && match != metav1.ResourceVersionMatchNotOlderThan + + return resourceVersion == "" || hasContinuation || hasLimit || unsupportedMatch } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/flowcontrol/watch_tracker.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/flowcontrol/watch_tracker.go index 287b100cfd8b..b7b9c886bfc6 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/flowcontrol/watch_tracker.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/flowcontrol/watch_tracker.go @@ -195,9 +195,8 @@ func (w *watchTracker) forgetWatch(identifier *watchIdentifier, index *indexValu // GetInterestedWatchCount implements WatchTracker interface. // // TODO(wojtek-t): As of now, requestInfo for object creation (POST) doesn't -// -// contain the Name field set. Figure out if we can somehow get it for the -// more accurate cost estimation. +// contain the Name field set. Figure out if we can somehow get it for the +// more accurate cost estimation. // // TODO(wojtek-t): Figure out how to approach DELETECOLLECTION calls. func (w *watchTracker) GetInterestedWatchCount(requestInfo *request.RequestInfo) int { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/openapi/enablement.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/openapi/enablement.go index 693821ac0258..b1db3c42b050 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/openapi/enablement.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/openapi/enablement.go @@ -68,7 +68,7 @@ func pruneEnums(schema *spec.Schema) *spec.Schema { // note that the new lines before the header should be removed too, // thus the slice range. clone() - schema.Description = schema.Description[:headerIndex] + schema.Description = strings.TrimSpace(schema.Description[:headerIndex]) } if len(schema.Enum) != 0 { // remove the enum field diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go index 06a74c1cd311..45143bf6efb0 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go @@ -121,7 +121,7 @@ func WithExponentialBackoff(ctx context.Context, retryBackoff wait.Backoff, webh // having a webhook error allows us to track the last actual webhook error for requests that // are later cancelled or time out. var webhookErr error - err := wait.ExponentialBackoffWithContext(ctx, retryBackoff, func() (bool, error) { + err := wait.ExponentialBackoffWithContext(ctx, retryBackoff, func(_ context.Context) (bool, error) { webhookErr = webhookFn() if shouldRetry(webhookErr) { return false, nil diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/admissionpolicyspec.go b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/admissionpolicyspec.go deleted file mode 100644 index 4936110fbdc9..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/admissionpolicyspec.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" -) - -// AdmissionPolicySpecApplyConfiguration represents an declarative configuration of the AdmissionPolicySpec type for use -// with apply. -type AdmissionPolicySpecApplyConfiguration struct { - ParamSource *ParamSourceApplyConfiguration `json:"paramSource,omitempty"` - MatchResources *MatchResourcesApplyConfiguration `json:"matchResources,omitempty"` - Validations []ValidationApplyConfiguration `json:"validations,omitempty"` - FailurePolicy *admissionregistrationv1alpha1.FailurePolicyType `json:"failurePolicy,omitempty"` -} - -// AdmissionPolicySpecApplyConfiguration constructs an declarative configuration of the AdmissionPolicySpec type for use with -// apply. -func AdmissionPolicySpec() *AdmissionPolicySpecApplyConfiguration { - return &AdmissionPolicySpecApplyConfiguration{} -} - -// WithParamSource sets the ParamSource field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ParamSource field is set to the value of the last call. -func (b *AdmissionPolicySpecApplyConfiguration) WithParamSource(value *ParamSourceApplyConfiguration) *AdmissionPolicySpecApplyConfiguration { - b.ParamSource = value - return b -} - -// WithMatchResources sets the MatchResources field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the MatchResources field is set to the value of the last call. -func (b *AdmissionPolicySpecApplyConfiguration) WithMatchResources(value *MatchResourcesApplyConfiguration) *AdmissionPolicySpecApplyConfiguration { - b.MatchResources = value - return b -} - -// WithValidations adds the given value to the Validations field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Validations field. -func (b *AdmissionPolicySpecApplyConfiguration) WithValidations(values ...*ValidationApplyConfiguration) *AdmissionPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithValidations") - } - b.Validations = append(b.Validations, *values[i]) - } - return b -} - -// WithFailurePolicy sets the FailurePolicy field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the FailurePolicy field is set to the value of the last call. -func (b *AdmissionPolicySpecApplyConfiguration) WithFailurePolicy(value admissionregistrationv1alpha1.FailurePolicyType) *AdmissionPolicySpecApplyConfiguration { - b.FailurePolicy = &value - return b -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramsource.go b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramsource.go deleted file mode 100644 index a7a5a6af833c..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramsource.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// ParamSourceApplyConfiguration represents an declarative configuration of the ParamSource type for use -// with apply. -type ParamSourceApplyConfiguration struct { - APIVersion *string `json:"apiVersion,omitempty"` - Kind *string `json:"kind,omitempty"` -} - -// ParamSourceApplyConfiguration constructs an declarative configuration of the ParamSource type for use with -// apply. -func ParamSource() *ParamSourceApplyConfiguration { - return &ParamSourceApplyConfiguration{} -} - -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIVersion field is set to the value of the last call. -func (b *ParamSourceApplyConfiguration) WithAPIVersion(value string) *ParamSourceApplyConfiguration { - b.APIVersion = &value - return b -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *ParamSourceApplyConfiguration) WithKind(value string) *ParamSourceApplyConfiguration { - b.Kind = &value - return b -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/rule.go b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/rule.go deleted file mode 100644 index 313de9d5f551..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/rule.go +++ /dev/null @@ -1,76 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" -) - -// RuleApplyConfiguration represents an declarative configuration of the Rule type for use -// with apply. -type RuleApplyConfiguration struct { - APIGroups []string `json:"apiGroups,omitempty"` - APIVersions []string `json:"apiVersions,omitempty"` - Resources []string `json:"resources,omitempty"` - Scope *v1alpha1.ScopeType `json:"scope,omitempty"` -} - -// RuleApplyConfiguration constructs an declarative configuration of the Rule type for use with -// apply. -func Rule() *RuleApplyConfiguration { - return &RuleApplyConfiguration{} -} - -// WithAPIGroups adds the given value to the APIGroups field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIGroups field. -func (b *RuleApplyConfiguration) WithAPIGroups(values ...string) *RuleApplyConfiguration { - for i := range values { - b.APIGroups = append(b.APIGroups, values[i]) - } - return b -} - -// WithAPIVersions adds the given value to the APIVersions field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIVersions field. -func (b *RuleApplyConfiguration) WithAPIVersions(values ...string) *RuleApplyConfiguration { - for i := range values { - b.APIVersions = append(b.APIVersions, values[i]) - } - return b -} - -// WithResources adds the given value to the Resources field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Resources field. -func (b *RuleApplyConfiguration) WithResources(values ...string) *RuleApplyConfiguration { - for i := range values { - b.Resources = append(b.Resources, values[i]) - } - return b -} - -// WithScope sets the Scope field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Scope field is set to the value of the last call. -func (b *RuleApplyConfiguration) WithScope(value v1alpha1.ScopeType) *RuleApplyConfiguration { - b.Scope = &value - return b -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/rulewithoperations.go b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/rulewithoperations.go deleted file mode 100644 index 112f4826b680..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/rulewithoperations.go +++ /dev/null @@ -1,85 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/api/admissionregistration/v1" - admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" -) - -// RuleWithOperationsApplyConfiguration represents an declarative configuration of the RuleWithOperations type for use -// with apply. -type RuleWithOperationsApplyConfiguration struct { - Operations []v1.OperationType `json:"operations,omitempty"` - admissionregistrationv1.RuleApplyConfiguration `json:",inline"` -} - -// RuleWithOperationsApplyConfiguration constructs an declarative configuration of the RuleWithOperations type for use with -// apply. -func RuleWithOperations() *RuleWithOperationsApplyConfiguration { - return &RuleWithOperationsApplyConfiguration{} -} - -// WithOperations adds the given value to the Operations field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Operations field. -func (b *RuleWithOperationsApplyConfiguration) WithOperations(values ...v1.OperationType) *RuleWithOperationsApplyConfiguration { - for i := range values { - b.Operations = append(b.Operations, values[i]) - } - return b -} - -// WithAPIGroups adds the given value to the APIGroups field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIGroups field. -func (b *RuleWithOperationsApplyConfiguration) WithAPIGroups(values ...string) *RuleWithOperationsApplyConfiguration { - for i := range values { - b.APIGroups = append(b.APIGroups, values[i]) - } - return b -} - -// WithAPIVersions adds the given value to the APIVersions field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIVersions field. -func (b *RuleWithOperationsApplyConfiguration) WithAPIVersions(values ...string) *RuleWithOperationsApplyConfiguration { - for i := range values { - b.APIVersions = append(b.APIVersions, values[i]) - } - return b -} - -// WithResources adds the given value to the Resources field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Resources field. -func (b *RuleWithOperationsApplyConfiguration) WithResources(values ...string) *RuleWithOperationsApplyConfiguration { - for i := range values { - b.Resources = append(b.Resources, values[i]) - } - return b -} - -// WithScope sets the Scope field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Scope field is set to the value of the last call. -func (b *RuleWithOperationsApplyConfiguration) WithScope(value v1.ScopeType) *RuleWithOperationsApplyConfiguration { - b.Scope = &value - return b -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rule.go b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rule.go deleted file mode 100644 index 21151b99809f..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rule.go +++ /dev/null @@ -1,76 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/admissionregistration/v1beta1" -) - -// RuleApplyConfiguration represents an declarative configuration of the Rule type for use -// with apply. -type RuleApplyConfiguration struct { - APIGroups []string `json:"apiGroups,omitempty"` - APIVersions []string `json:"apiVersions,omitempty"` - Resources []string `json:"resources,omitempty"` - Scope *v1beta1.ScopeType `json:"scope,omitempty"` -} - -// RuleApplyConfiguration constructs an declarative configuration of the Rule type for use with -// apply. -func Rule() *RuleApplyConfiguration { - return &RuleApplyConfiguration{} -} - -// WithAPIGroups adds the given value to the APIGroups field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIGroups field. -func (b *RuleApplyConfiguration) WithAPIGroups(values ...string) *RuleApplyConfiguration { - for i := range values { - b.APIGroups = append(b.APIGroups, values[i]) - } - return b -} - -// WithAPIVersions adds the given value to the APIVersions field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIVersions field. -func (b *RuleApplyConfiguration) WithAPIVersions(values ...string) *RuleApplyConfiguration { - for i := range values { - b.APIVersions = append(b.APIVersions, values[i]) - } - return b -} - -// WithResources adds the given value to the Resources field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Resources field. -func (b *RuleApplyConfiguration) WithResources(values ...string) *RuleApplyConfiguration { - for i := range values { - b.Resources = append(b.Resources, values[i]) - } - return b -} - -// WithScope sets the Scope field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Scope field is set to the value of the last call. -func (b *RuleApplyConfiguration) WithScope(value v1beta1.ScopeType) *RuleApplyConfiguration { - b.Scope = &value - return b -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rulewithoperations.go b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rulewithoperations.go deleted file mode 100644 index 0fd5dd34db0f..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rulewithoperations.go +++ /dev/null @@ -1,85 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1 "k8s.io/api/admissionregistration/v1" - admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" -) - -// RuleWithOperationsApplyConfiguration represents an declarative configuration of the RuleWithOperations type for use -// with apply. -type RuleWithOperationsApplyConfiguration struct { - Operations []v1.OperationType `json:"operations,omitempty"` - admissionregistrationv1.RuleApplyConfiguration `json:",inline"` -} - -// RuleWithOperationsApplyConfiguration constructs an declarative configuration of the RuleWithOperations type for use with -// apply. -func RuleWithOperations() *RuleWithOperationsApplyConfiguration { - return &RuleWithOperationsApplyConfiguration{} -} - -// WithOperations adds the given value to the Operations field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Operations field. -func (b *RuleWithOperationsApplyConfiguration) WithOperations(values ...v1.OperationType) *RuleWithOperationsApplyConfiguration { - for i := range values { - b.Operations = append(b.Operations, values[i]) - } - return b -} - -// WithAPIGroups adds the given value to the APIGroups field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIGroups field. -func (b *RuleWithOperationsApplyConfiguration) WithAPIGroups(values ...string) *RuleWithOperationsApplyConfiguration { - for i := range values { - b.APIGroups = append(b.APIGroups, values[i]) - } - return b -} - -// WithAPIVersions adds the given value to the APIVersions field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIVersions field. -func (b *RuleWithOperationsApplyConfiguration) WithAPIVersions(values ...string) *RuleWithOperationsApplyConfiguration { - for i := range values { - b.APIVersions = append(b.APIVersions, values[i]) - } - return b -} - -// WithResources adds the given value to the Resources field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Resources field. -func (b *RuleWithOperationsApplyConfiguration) WithResources(values ...string) *RuleWithOperationsApplyConfiguration { - for i := range values { - b.Resources = append(b.Resources, values[i]) - } - return b -} - -// WithScope sets the Scope field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Scope field is set to the value of the last call. -func (b *RuleWithOperationsApplyConfiguration) WithScope(value v1.ScopeType) *RuleWithOperationsApplyConfiguration { - b.Scope = &value - return b -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podresourcemetricsource.go b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podresourcemetricsource.go deleted file mode 100644 index 86601cc48a89..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podresourcemetricsource.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v2 - -import ( - v1 "k8s.io/api/core/v1" -) - -// PodResourceMetricSourceApplyConfiguration represents an declarative configuration of the PodResourceMetricSource type for use -// with apply. -type PodResourceMetricSourceApplyConfiguration struct { - Name *v1.ResourceName `json:"name,omitempty"` - Target *MetricTargetApplyConfiguration `json:"target,omitempty"` -} - -// PodResourceMetricSourceApplyConfiguration constructs an declarative configuration of the PodResourceMetricSource type for use with -// apply. -func PodResourceMetricSource() *PodResourceMetricSourceApplyConfiguration { - return &PodResourceMetricSourceApplyConfiguration{} -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *PodResourceMetricSourceApplyConfiguration) WithName(value v1.ResourceName) *PodResourceMetricSourceApplyConfiguration { - b.Name = &value - return b -} - -// WithTarget sets the Target field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Target field is set to the value of the last call. -func (b *PodResourceMetricSourceApplyConfiguration) WithTarget(value *MetricTargetApplyConfiguration) *PodResourceMetricSourceApplyConfiguration { - b.Target = value - return b -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go index db376b941bb8..493af6fb3c1e 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go @@ -35,7 +35,7 @@ type ServiceSpecApplyConfiguration struct { LoadBalancerIP *string `json:"loadBalancerIP,omitempty"` LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty"` ExternalName *string `json:"externalName,omitempty"` - ExternalTrafficPolicy *corev1.ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty"` + ExternalTrafficPolicy *corev1.ServiceExternalTrafficPolicy `json:"externalTrafficPolicy,omitempty"` HealthCheckNodePort *int32 `json:"healthCheckNodePort,omitempty"` PublishNotReadyAddresses *bool `json:"publishNotReadyAddresses,omitempty"` SessionAffinityConfig *SessionAffinityConfigApplyConfiguration `json:"sessionAffinityConfig,omitempty"` @@ -43,7 +43,7 @@ type ServiceSpecApplyConfiguration struct { IPFamilyPolicy *corev1.IPFamilyPolicy `json:"ipFamilyPolicy,omitempty"` AllocateLoadBalancerNodePorts *bool `json:"allocateLoadBalancerNodePorts,omitempty"` LoadBalancerClass *string `json:"loadBalancerClass,omitempty"` - InternalTrafficPolicy *corev1.ServiceInternalTrafficPolicyType `json:"internalTrafficPolicy,omitempty"` + InternalTrafficPolicy *corev1.ServiceInternalTrafficPolicy `json:"internalTrafficPolicy,omitempty"` } // ServiceSpecApplyConfiguration constructs an declarative configuration of the ServiceSpec type for use with @@ -152,7 +152,7 @@ func (b *ServiceSpecApplyConfiguration) WithExternalName(value string) *ServiceS // WithExternalTrafficPolicy sets the ExternalTrafficPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ExternalTrafficPolicy field is set to the value of the last call. -func (b *ServiceSpecApplyConfiguration) WithExternalTrafficPolicy(value corev1.ServiceExternalTrafficPolicyType) *ServiceSpecApplyConfiguration { +func (b *ServiceSpecApplyConfiguration) WithExternalTrafficPolicy(value corev1.ServiceExternalTrafficPolicy) *ServiceSpecApplyConfiguration { b.ExternalTrafficPolicy = &value return b } @@ -218,7 +218,7 @@ func (b *ServiceSpecApplyConfiguration) WithLoadBalancerClass(value string) *Ser // WithInternalTrafficPolicy sets the InternalTrafficPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the InternalTrafficPolicy field is set to the value of the last call. -func (b *ServiceSpecApplyConfiguration) WithInternalTrafficPolicy(value corev1.ServiceInternalTrafficPolicyType) *ServiceSpecApplyConfiguration { +func (b *ServiceSpecApplyConfiguration) WithInternalTrafficPolicy(value corev1.ServiceInternalTrafficPolicy) *ServiceSpecApplyConfiguration { b.InternalTrafficPolicy = &value return b } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedcsidriver.go b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedcsidriver.go deleted file mode 100644 index 27b49bf15384..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedcsidriver.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// AllowedCSIDriverApplyConfiguration represents an declarative configuration of the AllowedCSIDriver type for use -// with apply. -type AllowedCSIDriverApplyConfiguration struct { - Name *string `json:"name,omitempty"` -} - -// AllowedCSIDriverApplyConfiguration constructs an declarative configuration of the AllowedCSIDriver type for use with -// apply. -func AllowedCSIDriver() *AllowedCSIDriverApplyConfiguration { - return &AllowedCSIDriverApplyConfiguration{} -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *AllowedCSIDriverApplyConfiguration) WithName(value string) *AllowedCSIDriverApplyConfiguration { - b.Name = &value - return b -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedflexvolume.go b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedflexvolume.go deleted file mode 100644 index 30c3724cfee9..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedflexvolume.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// AllowedFlexVolumeApplyConfiguration represents an declarative configuration of the AllowedFlexVolume type for use -// with apply. -type AllowedFlexVolumeApplyConfiguration struct { - Driver *string `json:"driver,omitempty"` -} - -// AllowedFlexVolumeApplyConfiguration constructs an declarative configuration of the AllowedFlexVolume type for use with -// apply. -func AllowedFlexVolume() *AllowedFlexVolumeApplyConfiguration { - return &AllowedFlexVolumeApplyConfiguration{} -} - -// WithDriver sets the Driver field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Driver field is set to the value of the last call. -func (b *AllowedFlexVolumeApplyConfiguration) WithDriver(value string) *AllowedFlexVolumeApplyConfiguration { - b.Driver = &value - return b -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedhostpath.go b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedhostpath.go deleted file mode 100644 index 493815d8d4a5..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedhostpath.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// AllowedHostPathApplyConfiguration represents an declarative configuration of the AllowedHostPath type for use -// with apply. -type AllowedHostPathApplyConfiguration struct { - PathPrefix *string `json:"pathPrefix,omitempty"` - ReadOnly *bool `json:"readOnly,omitempty"` -} - -// AllowedHostPathApplyConfiguration constructs an declarative configuration of the AllowedHostPath type for use with -// apply. -func AllowedHostPath() *AllowedHostPathApplyConfiguration { - return &AllowedHostPathApplyConfiguration{} -} - -// WithPathPrefix sets the PathPrefix field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the PathPrefix field is set to the value of the last call. -func (b *AllowedHostPathApplyConfiguration) WithPathPrefix(value string) *AllowedHostPathApplyConfiguration { - b.PathPrefix = &value - return b -} - -// WithReadOnly sets the ReadOnly field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ReadOnly field is set to the value of the last call. -func (b *AllowedHostPathApplyConfiguration) WithReadOnly(value bool) *AllowedHostPathApplyConfiguration { - b.ReadOnly = &value - return b -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/fsgroupstrategyoptions.go b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/fsgroupstrategyoptions.go deleted file mode 100644 index c7434a6af00d..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/fsgroupstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" -) - -// FSGroupStrategyOptionsApplyConfiguration represents an declarative configuration of the FSGroupStrategyOptions type for use -// with apply. -type FSGroupStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.FSGroupStrategyType `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// FSGroupStrategyOptionsApplyConfiguration constructs an declarative configuration of the FSGroupStrategyOptions type for use with -// apply. -func FSGroupStrategyOptions() *FSGroupStrategyOptionsApplyConfiguration { - return &FSGroupStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *FSGroupStrategyOptionsApplyConfiguration) WithRule(value v1beta1.FSGroupStrategyType) *FSGroupStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *FSGroupStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *FSGroupStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/hostportrange.go b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/hostportrange.go deleted file mode 100644 index 7c796881393f..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/hostportrange.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// HostPortRangeApplyConfiguration represents an declarative configuration of the HostPortRange type for use -// with apply. -type HostPortRangeApplyConfiguration struct { - Min *int32 `json:"min,omitempty"` - Max *int32 `json:"max,omitempty"` -} - -// HostPortRangeApplyConfiguration constructs an declarative configuration of the HostPortRange type for use with -// apply. -func HostPortRange() *HostPortRangeApplyConfiguration { - return &HostPortRangeApplyConfiguration{} -} - -// WithMin sets the Min field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Min field is set to the value of the last call. -func (b *HostPortRangeApplyConfiguration) WithMin(value int32) *HostPortRangeApplyConfiguration { - b.Min = &value - return b -} - -// WithMax sets the Max field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Max field is set to the value of the last call. -func (b *HostPortRangeApplyConfiguration) WithMax(value int32) *HostPortRangeApplyConfiguration { - b.Max = &value - return b -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/idrange.go b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/idrange.go deleted file mode 100644 index af46f76581ad..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/idrange.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// IDRangeApplyConfiguration represents an declarative configuration of the IDRange type for use -// with apply. -type IDRangeApplyConfiguration struct { - Min *int64 `json:"min,omitempty"` - Max *int64 `json:"max,omitempty"` -} - -// IDRangeApplyConfiguration constructs an declarative configuration of the IDRange type for use with -// apply. -func IDRange() *IDRangeApplyConfiguration { - return &IDRangeApplyConfiguration{} -} - -// WithMin sets the Min field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Min field is set to the value of the last call. -func (b *IDRangeApplyConfiguration) WithMin(value int64) *IDRangeApplyConfiguration { - b.Min = &value - return b -} - -// WithMax sets the Max field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Max field is set to the value of the last call. -func (b *IDRangeApplyConfiguration) WithMax(value int64) *IDRangeApplyConfiguration { - b.Max = &value - return b -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicy.go b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicy.go deleted file mode 100644 index c70906cfafd7..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicy.go +++ /dev/null @@ -1,247 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - managedfields "k8s.io/apimachinery/pkg/util/managedfields" - internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" -) - -// PodSecurityPolicyApplyConfiguration represents an declarative configuration of the PodSecurityPolicy type for use -// with apply. -type PodSecurityPolicyApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *PodSecurityPolicySpecApplyConfiguration `json:"spec,omitempty"` -} - -// PodSecurityPolicy constructs an declarative configuration of the PodSecurityPolicy type for use with -// apply. -func PodSecurityPolicy(name string) *PodSecurityPolicyApplyConfiguration { - b := &PodSecurityPolicyApplyConfiguration{} - b.WithName(name) - b.WithKind("PodSecurityPolicy") - b.WithAPIVersion("extensions/v1beta1") - return b -} - -// ExtractPodSecurityPolicy extracts the applied configuration owned by fieldManager from -// podSecurityPolicy. If no managedFields are found in podSecurityPolicy for fieldManager, a -// PodSecurityPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. It is possible that no managed fields were found for because other -// field managers have taken ownership of all the fields previously owned by fieldManager, or because -// the fieldManager never owned fields any fields. -// podSecurityPolicy must be a unmodified PodSecurityPolicy API object that was retrieved from the Kubernetes API. -// ExtractPodSecurityPolicy provides a way to perform a extract/modify-in-place/apply workflow. -// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously -// applied if another fieldManager has updated or force applied any of the previously applied fields. -// Experimental! -func ExtractPodSecurityPolicy(podSecurityPolicy *extensionsv1beta1.PodSecurityPolicy, fieldManager string) (*PodSecurityPolicyApplyConfiguration, error) { - return extractPodSecurityPolicy(podSecurityPolicy, fieldManager, "") -} - -// ExtractPodSecurityPolicyStatus is the same as ExtractPodSecurityPolicy except -// that it extracts the status subresource applied configuration. -// Experimental! -func ExtractPodSecurityPolicyStatus(podSecurityPolicy *extensionsv1beta1.PodSecurityPolicy, fieldManager string) (*PodSecurityPolicyApplyConfiguration, error) { - return extractPodSecurityPolicy(podSecurityPolicy, fieldManager, "status") -} - -func extractPodSecurityPolicy(podSecurityPolicy *extensionsv1beta1.PodSecurityPolicy, fieldManager string, subresource string) (*PodSecurityPolicyApplyConfiguration, error) { - b := &PodSecurityPolicyApplyConfiguration{} - err := managedfields.ExtractInto(podSecurityPolicy, internal.Parser().Type("io.k8s.api.extensions.v1beta1.PodSecurityPolicy"), fieldManager, b, subresource) - if err != nil { - return nil, err - } - b.WithName(podSecurityPolicy.Name) - - b.WithKind("PodSecurityPolicy") - b.WithAPIVersion("extensions/v1beta1") - return b, nil -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithKind(value string) *PodSecurityPolicyApplyConfiguration { - b.Kind = &value - return b -} - -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIVersion field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithAPIVersion(value string) *PodSecurityPolicyApplyConfiguration { - b.APIVersion = &value - return b -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithName(value string) *PodSecurityPolicyApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value - return b -} - -// WithGenerateName sets the GenerateName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the GenerateName field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithGenerateName(value string) *PodSecurityPolicyApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value - return b -} - -// WithNamespace sets the Namespace field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Namespace field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithNamespace(value string) *PodSecurityPolicyApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value - return b -} - -// WithUID sets the UID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the UID field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithUID(value types.UID) *PodSecurityPolicyApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value - return b -} - -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithResourceVersion(value string) *PodSecurityPolicyApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value - return b -} - -// WithGeneration sets the Generation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Generation field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithGeneration(value int64) *PodSecurityPolicyApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value - return b -} - -// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodSecurityPolicyApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value - return b -} - -// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodSecurityPolicyApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value - return b -} - -// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodSecurityPolicyApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value - return b -} - -// WithLabels puts the entries into the Labels field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Labels field, -// overwriting an existing map entries in Labels field with the same key. -func (b *PodSecurityPolicyApplyConfiguration) WithLabels(entries map[string]string) *PodSecurityPolicyApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Labels[k] = v - } - return b -} - -// WithAnnotations puts the entries into the Annotations field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Annotations field, -// overwriting an existing map entries in Annotations field with the same key. -func (b *PodSecurityPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *PodSecurityPolicyApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Annotations[k] = v - } - return b -} - -// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *PodSecurityPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodSecurityPolicyApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - if values[i] == nil { - panic("nil value passed to WithOwnerReferences") - } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) - } - return b -} - -// WithFinalizers adds the given value to the Finalizers field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *PodSecurityPolicyApplyConfiguration) WithFinalizers(values ...string) *PodSecurityPolicyApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) - } - return b -} - -func (b *PodSecurityPolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { - if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} - } -} - -// WithSpec sets the Spec field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Spec field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithSpec(value *PodSecurityPolicySpecApplyConfiguration) *PodSecurityPolicyApplyConfiguration { - b.Spec = value - return b -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicyspec.go b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicyspec.go deleted file mode 100644 index de3949dc92d5..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicyspec.go +++ /dev/null @@ -1,285 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1 "k8s.io/api/core/v1" - v1beta1 "k8s.io/api/extensions/v1beta1" -) - -// PodSecurityPolicySpecApplyConfiguration represents an declarative configuration of the PodSecurityPolicySpec type for use -// with apply. -type PodSecurityPolicySpecApplyConfiguration struct { - Privileged *bool `json:"privileged,omitempty"` - DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty"` - RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty"` - AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty"` - Volumes []v1beta1.FSType `json:"volumes,omitempty"` - HostNetwork *bool `json:"hostNetwork,omitempty"` - HostPorts []HostPortRangeApplyConfiguration `json:"hostPorts,omitempty"` - HostPID *bool `json:"hostPID,omitempty"` - HostIPC *bool `json:"hostIPC,omitempty"` - SELinux *SELinuxStrategyOptionsApplyConfiguration `json:"seLinux,omitempty"` - RunAsUser *RunAsUserStrategyOptionsApplyConfiguration `json:"runAsUser,omitempty"` - RunAsGroup *RunAsGroupStrategyOptionsApplyConfiguration `json:"runAsGroup,omitempty"` - SupplementalGroups *SupplementalGroupsStrategyOptionsApplyConfiguration `json:"supplementalGroups,omitempty"` - FSGroup *FSGroupStrategyOptionsApplyConfiguration `json:"fsGroup,omitempty"` - ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty"` - DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty"` - AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty"` - AllowedHostPaths []AllowedHostPathApplyConfiguration `json:"allowedHostPaths,omitempty"` - AllowedFlexVolumes []AllowedFlexVolumeApplyConfiguration `json:"allowedFlexVolumes,omitempty"` - AllowedCSIDrivers []AllowedCSIDriverApplyConfiguration `json:"allowedCSIDrivers,omitempty"` - AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty"` - ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty"` - AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty"` - RuntimeClass *RuntimeClassStrategyOptionsApplyConfiguration `json:"runtimeClass,omitempty"` -} - -// PodSecurityPolicySpecApplyConfiguration constructs an declarative configuration of the PodSecurityPolicySpec type for use with -// apply. -func PodSecurityPolicySpec() *PodSecurityPolicySpecApplyConfiguration { - return &PodSecurityPolicySpecApplyConfiguration{} -} - -// WithPrivileged sets the Privileged field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Privileged field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithPrivileged(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.Privileged = &value - return b -} - -// WithDefaultAddCapabilities adds the given value to the DefaultAddCapabilities field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the DefaultAddCapabilities field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithDefaultAddCapabilities(values ...v1.Capability) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.DefaultAddCapabilities = append(b.DefaultAddCapabilities, values[i]) - } - return b -} - -// WithRequiredDropCapabilities adds the given value to the RequiredDropCapabilities field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the RequiredDropCapabilities field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRequiredDropCapabilities(values ...v1.Capability) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.RequiredDropCapabilities = append(b.RequiredDropCapabilities, values[i]) - } - return b -} - -// WithAllowedCapabilities adds the given value to the AllowedCapabilities field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedCapabilities field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedCapabilities(values ...v1.Capability) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.AllowedCapabilities = append(b.AllowedCapabilities, values[i]) - } - return b -} - -// WithVolumes adds the given value to the Volumes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Volumes field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithVolumes(values ...v1beta1.FSType) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.Volumes = append(b.Volumes, values[i]) - } - return b -} - -// WithHostNetwork sets the HostNetwork field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the HostNetwork field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostNetwork(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.HostNetwork = &value - return b -} - -// WithHostPorts adds the given value to the HostPorts field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the HostPorts field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostPorts(values ...*HostPortRangeApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithHostPorts") - } - b.HostPorts = append(b.HostPorts, *values[i]) - } - return b -} - -// WithHostPID sets the HostPID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the HostPID field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostPID(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.HostPID = &value - return b -} - -// WithHostIPC sets the HostIPC field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the HostIPC field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostIPC(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.HostIPC = &value - return b -} - -// WithSELinux sets the SELinux field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SELinux field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithSELinux(value *SELinuxStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.SELinux = value - return b -} - -// WithRunAsUser sets the RunAsUser field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RunAsUser field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRunAsUser(value *RunAsUserStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.RunAsUser = value - return b -} - -// WithRunAsGroup sets the RunAsGroup field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RunAsGroup field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRunAsGroup(value *RunAsGroupStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.RunAsGroup = value - return b -} - -// WithSupplementalGroups sets the SupplementalGroups field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SupplementalGroups field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithSupplementalGroups(value *SupplementalGroupsStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.SupplementalGroups = value - return b -} - -// WithFSGroup sets the FSGroup field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the FSGroup field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithFSGroup(value *FSGroupStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.FSGroup = value - return b -} - -// WithReadOnlyRootFilesystem sets the ReadOnlyRootFilesystem field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ReadOnlyRootFilesystem field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithReadOnlyRootFilesystem(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.ReadOnlyRootFilesystem = &value - return b -} - -// WithDefaultAllowPrivilegeEscalation sets the DefaultAllowPrivilegeEscalation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DefaultAllowPrivilegeEscalation field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithDefaultAllowPrivilegeEscalation(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.DefaultAllowPrivilegeEscalation = &value - return b -} - -// WithAllowPrivilegeEscalation sets the AllowPrivilegeEscalation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AllowPrivilegeEscalation field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowPrivilegeEscalation(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.AllowPrivilegeEscalation = &value - return b -} - -// WithAllowedHostPaths adds the given value to the AllowedHostPaths field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedHostPaths field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedHostPaths(values ...*AllowedHostPathApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithAllowedHostPaths") - } - b.AllowedHostPaths = append(b.AllowedHostPaths, *values[i]) - } - return b -} - -// WithAllowedFlexVolumes adds the given value to the AllowedFlexVolumes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedFlexVolumes field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedFlexVolumes(values ...*AllowedFlexVolumeApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithAllowedFlexVolumes") - } - b.AllowedFlexVolumes = append(b.AllowedFlexVolumes, *values[i]) - } - return b -} - -// WithAllowedCSIDrivers adds the given value to the AllowedCSIDrivers field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedCSIDrivers field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedCSIDrivers(values ...*AllowedCSIDriverApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithAllowedCSIDrivers") - } - b.AllowedCSIDrivers = append(b.AllowedCSIDrivers, *values[i]) - } - return b -} - -// WithAllowedUnsafeSysctls adds the given value to the AllowedUnsafeSysctls field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedUnsafeSysctls field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedUnsafeSysctls(values ...string) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.AllowedUnsafeSysctls = append(b.AllowedUnsafeSysctls, values[i]) - } - return b -} - -// WithForbiddenSysctls adds the given value to the ForbiddenSysctls field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the ForbiddenSysctls field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithForbiddenSysctls(values ...string) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.ForbiddenSysctls = append(b.ForbiddenSysctls, values[i]) - } - return b -} - -// WithAllowedProcMountTypes adds the given value to the AllowedProcMountTypes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedProcMountTypes field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedProcMountTypes(values ...v1.ProcMountType) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.AllowedProcMountTypes = append(b.AllowedProcMountTypes, values[i]) - } - return b -} - -// WithRuntimeClass sets the RuntimeClass field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RuntimeClass field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRuntimeClass(value *RuntimeClassStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.RuntimeClass = value - return b -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasgroupstrategyoptions.go b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasgroupstrategyoptions.go deleted file mode 100644 index 75e76e85fd1b..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasgroupstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" -) - -// RunAsGroupStrategyOptionsApplyConfiguration represents an declarative configuration of the RunAsGroupStrategyOptions type for use -// with apply. -type RunAsGroupStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.RunAsGroupStrategy `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// RunAsGroupStrategyOptionsApplyConfiguration constructs an declarative configuration of the RunAsGroupStrategyOptions type for use with -// apply. -func RunAsGroupStrategyOptions() *RunAsGroupStrategyOptionsApplyConfiguration { - return &RunAsGroupStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *RunAsGroupStrategyOptionsApplyConfiguration) WithRule(value v1beta1.RunAsGroupStrategy) *RunAsGroupStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *RunAsGroupStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *RunAsGroupStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasuserstrategyoptions.go b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasuserstrategyoptions.go deleted file mode 100644 index 712c1675ac92..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasuserstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" -) - -// RunAsUserStrategyOptionsApplyConfiguration represents an declarative configuration of the RunAsUserStrategyOptions type for use -// with apply. -type RunAsUserStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.RunAsUserStrategy `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// RunAsUserStrategyOptionsApplyConfiguration constructs an declarative configuration of the RunAsUserStrategyOptions type for use with -// apply. -func RunAsUserStrategyOptions() *RunAsUserStrategyOptionsApplyConfiguration { - return &RunAsUserStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *RunAsUserStrategyOptionsApplyConfiguration) WithRule(value v1beta1.RunAsUserStrategy) *RunAsUserStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *RunAsUserStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *RunAsUserStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runtimeclassstrategyoptions.go b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runtimeclassstrategyoptions.go deleted file mode 100644 index c19a7ce61756..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runtimeclassstrategyoptions.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// RuntimeClassStrategyOptionsApplyConfiguration represents an declarative configuration of the RuntimeClassStrategyOptions type for use -// with apply. -type RuntimeClassStrategyOptionsApplyConfiguration struct { - AllowedRuntimeClassNames []string `json:"allowedRuntimeClassNames,omitempty"` - DefaultRuntimeClassName *string `json:"defaultRuntimeClassName,omitempty"` -} - -// RuntimeClassStrategyOptionsApplyConfiguration constructs an declarative configuration of the RuntimeClassStrategyOptions type for use with -// apply. -func RuntimeClassStrategyOptions() *RuntimeClassStrategyOptionsApplyConfiguration { - return &RuntimeClassStrategyOptionsApplyConfiguration{} -} - -// WithAllowedRuntimeClassNames adds the given value to the AllowedRuntimeClassNames field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedRuntimeClassNames field. -func (b *RuntimeClassStrategyOptionsApplyConfiguration) WithAllowedRuntimeClassNames(values ...string) *RuntimeClassStrategyOptionsApplyConfiguration { - for i := range values { - b.AllowedRuntimeClassNames = append(b.AllowedRuntimeClassNames, values[i]) - } - return b -} - -// WithDefaultRuntimeClassName sets the DefaultRuntimeClassName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DefaultRuntimeClassName field is set to the value of the last call. -func (b *RuntimeClassStrategyOptionsApplyConfiguration) WithDefaultRuntimeClassName(value string) *RuntimeClassStrategyOptionsApplyConfiguration { - b.DefaultRuntimeClassName = &value - return b -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/selinuxstrategyoptions.go b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/selinuxstrategyoptions.go deleted file mode 100644 index 265906a73a5f..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/selinuxstrategyoptions.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/client-go/applyconfigurations/core/v1" -) - -// SELinuxStrategyOptionsApplyConfiguration represents an declarative configuration of the SELinuxStrategyOptions type for use -// with apply. -type SELinuxStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.SELinuxStrategy `json:"rule,omitempty"` - SELinuxOptions *v1.SELinuxOptionsApplyConfiguration `json:"seLinuxOptions,omitempty"` -} - -// SELinuxStrategyOptionsApplyConfiguration constructs an declarative configuration of the SELinuxStrategyOptions type for use with -// apply. -func SELinuxStrategyOptions() *SELinuxStrategyOptionsApplyConfiguration { - return &SELinuxStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *SELinuxStrategyOptionsApplyConfiguration) WithRule(value v1beta1.SELinuxStrategy) *SELinuxStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithSELinuxOptions sets the SELinuxOptions field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SELinuxOptions field is set to the value of the last call. -func (b *SELinuxStrategyOptionsApplyConfiguration) WithSELinuxOptions(value *v1.SELinuxOptionsApplyConfiguration) *SELinuxStrategyOptionsApplyConfiguration { - b.SELinuxOptions = value - return b -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/supplementalgroupsstrategyoptions.go b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/supplementalgroupsstrategyoptions.go deleted file mode 100644 index ec4313812459..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/supplementalgroupsstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" -) - -// SupplementalGroupsStrategyOptionsApplyConfiguration represents an declarative configuration of the SupplementalGroupsStrategyOptions type for use -// with apply. -type SupplementalGroupsStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.SupplementalGroupsStrategyType `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// SupplementalGroupsStrategyOptionsApplyConfiguration constructs an declarative configuration of the SupplementalGroupsStrategyOptions type for use with -// apply. -func SupplementalGroupsStrategyOptions() *SupplementalGroupsStrategyOptionsApplyConfiguration { - return &SupplementalGroupsStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *SupplementalGroupsStrategyOptionsApplyConfiguration) WithRule(value v1beta1.SupplementalGroupsStrategyType) *SupplementalGroupsStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *SupplementalGroupsStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *SupplementalGroupsStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go index afad3b12e182..e0241365068c 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go @@ -6553,6 +6553,8 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: io.k8s.api.core.v1.ResourceClaim elementRelationship: associative + keys: + - name - name: limits type: map: @@ -7732,29 +7734,6 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime default: {} -- name: io.k8s.api.extensions.v1beta1.AllowedCSIDriver - map: - fields: - - name: name - type: - scalar: string - default: "" -- name: io.k8s.api.extensions.v1beta1.AllowedFlexVolume - map: - fields: - - name: driver - type: - scalar: string - default: "" -- name: io.k8s.api.extensions.v1beta1.AllowedHostPath - map: - fields: - - name: pathPrefix - type: - scalar: string - - name: readOnly - type: - scalar: boolean - name: io.k8s.api.extensions.v1beta1.DaemonSet map: fields: @@ -7990,18 +7969,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: type type: scalar: string -- name: io.k8s.api.extensions.v1beta1.FSGroupStrategyOptions - map: - fields: - - name: ranges - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.IDRange - elementRelationship: atomic - - name: rule - type: - scalar: string - name: io.k8s.api.extensions.v1beta1.HTTPIngressPath map: fields: @@ -8024,28 +7991,6 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: io.k8s.api.extensions.v1beta1.HTTPIngressPath elementRelationship: atomic -- name: io.k8s.api.extensions.v1beta1.HostPortRange - map: - fields: - - name: max - type: - scalar: numeric - default: 0 - - name: min - type: - scalar: numeric - default: 0 -- name: io.k8s.api.extensions.v1beta1.IDRange - map: - fields: - - name: max - type: - scalar: numeric - default: 0 - - name: min - type: - scalar: numeric - default: 0 - name: io.k8s.api.extensions.v1beta1.IPBlock map: fields: @@ -8291,135 +8236,6 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: associative keys: - type -- name: io.k8s.api.extensions.v1beta1.PodSecurityPolicy - map: - fields: - - name: apiVersion - type: - scalar: string - - name: kind - type: - scalar: string - - name: metadata - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: spec - type: - namedType: io.k8s.api.extensions.v1beta1.PodSecurityPolicySpec - default: {} -- name: io.k8s.api.extensions.v1beta1.PodSecurityPolicySpec - map: - fields: - - name: allowPrivilegeEscalation - type: - scalar: boolean - - name: allowedCSIDrivers - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.AllowedCSIDriver - elementRelationship: atomic - - name: allowedCapabilities - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: allowedFlexVolumes - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.AllowedFlexVolume - elementRelationship: atomic - - name: allowedHostPaths - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.AllowedHostPath - elementRelationship: atomic - - name: allowedProcMountTypes - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: allowedUnsafeSysctls - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: defaultAddCapabilities - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: defaultAllowPrivilegeEscalation - type: - scalar: boolean - - name: forbiddenSysctls - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: fsGroup - type: - namedType: io.k8s.api.extensions.v1beta1.FSGroupStrategyOptions - default: {} - - name: hostIPC - type: - scalar: boolean - - name: hostNetwork - type: - scalar: boolean - - name: hostPID - type: - scalar: boolean - - name: hostPorts - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.HostPortRange - elementRelationship: atomic - - name: privileged - type: - scalar: boolean - - name: readOnlyRootFilesystem - type: - scalar: boolean - - name: requiredDropCapabilities - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: runAsGroup - type: - namedType: io.k8s.api.extensions.v1beta1.RunAsGroupStrategyOptions - - name: runAsUser - type: - namedType: io.k8s.api.extensions.v1beta1.RunAsUserStrategyOptions - default: {} - - name: runtimeClass - type: - namedType: io.k8s.api.extensions.v1beta1.RuntimeClassStrategyOptions - - name: seLinux - type: - namedType: io.k8s.api.extensions.v1beta1.SELinuxStrategyOptions - default: {} - - name: supplementalGroups - type: - namedType: io.k8s.api.extensions.v1beta1.SupplementalGroupsStrategyOptions - default: {} - - name: volumes - type: - list: - elementType: - scalar: string - elementRelationship: atomic - name: io.k8s.api.extensions.v1beta1.ReplicaSet map: fields: @@ -8529,66 +8345,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: maxUnavailable type: namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString -- name: io.k8s.api.extensions.v1beta1.RunAsGroupStrategyOptions - map: - fields: - - name: ranges - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.IDRange - elementRelationship: atomic - - name: rule - type: - scalar: string - default: "" -- name: io.k8s.api.extensions.v1beta1.RunAsUserStrategyOptions - map: - fields: - - name: ranges - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.IDRange - elementRelationship: atomic - - name: rule - type: - scalar: string - default: "" -- name: io.k8s.api.extensions.v1beta1.RuntimeClassStrategyOptions - map: - fields: - - name: allowedRuntimeClassNames - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: defaultRuntimeClassName - type: - scalar: string -- name: io.k8s.api.extensions.v1beta1.SELinuxStrategyOptions - map: - fields: - - name: rule - type: - scalar: string - default: "" - - name: seLinuxOptions - type: - namedType: io.k8s.api.core.v1.SELinuxOptions -- name: io.k8s.api.extensions.v1beta1.SupplementalGroupsStrategyOptions - map: - fields: - - name: ranges - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.IDRange - elementRelationship: atomic - - name: rule - type: - scalar: string - name: io.k8s.api.flowcontrol.v1alpha1.FlowDistinguisherMethod map: fields: diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/meta/v1/groupversionkind.go b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/meta/v1/groupversionkind.go deleted file mode 100644 index f400e516493d..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/meta/v1/groupversionkind.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1 - -// GroupVersionKindApplyConfiguration represents an declarative configuration of the GroupVersionKind type for use -// with apply. -type GroupVersionKindApplyConfiguration struct { - Group *string `json:"group,omitempty"` - Version *string `json:"version,omitempty"` - Kind *string `json:"kind,omitempty"` -} - -// GroupVersionKindApplyConfiguration constructs an declarative configuration of the GroupVersionKind type for use with -// apply. -func GroupVersionKind() *GroupVersionKindApplyConfiguration { - return &GroupVersionKindApplyConfiguration{} -} - -// WithGroup sets the Group field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Group field is set to the value of the last call. -func (b *GroupVersionKindApplyConfiguration) WithGroup(value string) *GroupVersionKindApplyConfiguration { - b.Group = &value - return b -} - -// WithVersion sets the Version field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Version field is set to the value of the last call. -func (b *GroupVersionKindApplyConfiguration) WithVersion(value string) *GroupVersionKindApplyConfiguration { - b.Version = &value - return b -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *GroupVersionKindApplyConfiguration) WithKind(value string) *GroupVersionKindApplyConfiguration { - b.Kind = &value - return b -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/meta/v1/listmeta.go b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/meta/v1/listmeta.go deleted file mode 100644 index 5cadee3353f7..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/meta/v1/listmeta.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1 - -// ListMetaApplyConfiguration represents an declarative configuration of the ListMeta type for use -// with apply. -type ListMetaApplyConfiguration struct { - SelfLink *string `json:"selfLink,omitempty"` - ResourceVersion *string `json:"resourceVersion,omitempty"` - Continue *string `json:"continue,omitempty"` - RemainingItemCount *int64 `json:"remainingItemCount,omitempty"` -} - -// ListMetaApplyConfiguration constructs an declarative configuration of the ListMeta type for use with -// apply. -func ListMeta() *ListMetaApplyConfiguration { - return &ListMetaApplyConfiguration{} -} - -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *ListMetaApplyConfiguration) WithSelfLink(value string) *ListMetaApplyConfiguration { - b.SelfLink = &value - return b -} - -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *ListMetaApplyConfiguration) WithResourceVersion(value string) *ListMetaApplyConfiguration { - b.ResourceVersion = &value - return b -} - -// WithContinue sets the Continue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Continue field is set to the value of the last call. -func (b *ListMetaApplyConfiguration) WithContinue(value string) *ListMetaApplyConfiguration { - b.Continue = &value - return b -} - -// WithRemainingItemCount sets the RemainingItemCount field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RemainingItemCount field is set to the value of the last call. -func (b *ListMetaApplyConfiguration) WithRemainingItemCount(value int64) *ListMetaApplyConfiguration { - b.RemainingItemCount = &value - return b -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/meta/v1/status.go b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/meta/v1/status.go deleted file mode 100644 index 7db432089ef5..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/meta/v1/status.go +++ /dev/null @@ -1,142 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// StatusApplyConfiguration represents an declarative configuration of the Status type for use -// with apply. -type StatusApplyConfiguration struct { - TypeMetaApplyConfiguration `json:",inline"` - *ListMetaApplyConfiguration `json:"metadata,omitempty"` - Status *string `json:"status,omitempty"` - Message *string `json:"message,omitempty"` - Reason *metav1.StatusReason `json:"reason,omitempty"` - Details *StatusDetailsApplyConfiguration `json:"details,omitempty"` - Code *int32 `json:"code,omitempty"` -} - -// StatusApplyConfiguration constructs an declarative configuration of the Status type for use with -// apply. -func Status() *StatusApplyConfiguration { - b := &StatusApplyConfiguration{} - b.WithKind("Status") - b.WithAPIVersion("meta.k8s.io/v1") - return b -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithKind(value string) *StatusApplyConfiguration { - b.Kind = &value - return b -} - -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIVersion field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithAPIVersion(value string) *StatusApplyConfiguration { - b.APIVersion = &value - return b -} - -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithSelfLink(value string) *StatusApplyConfiguration { - b.ensureListMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithResourceVersion(value string) *StatusApplyConfiguration { - b.ensureListMetaApplyConfigurationExists() - b.ResourceVersion = &value - return b -} - -// WithContinue sets the Continue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Continue field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithContinue(value string) *StatusApplyConfiguration { - b.ensureListMetaApplyConfigurationExists() - b.Continue = &value - return b -} - -// WithRemainingItemCount sets the RemainingItemCount field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RemainingItemCount field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithRemainingItemCount(value int64) *StatusApplyConfiguration { - b.ensureListMetaApplyConfigurationExists() - b.RemainingItemCount = &value - return b -} - -func (b *StatusApplyConfiguration) ensureListMetaApplyConfigurationExists() { - if b.ListMetaApplyConfiguration == nil { - b.ListMetaApplyConfiguration = &ListMetaApplyConfiguration{} - } -} - -// WithStatus sets the Status field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Status field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithStatus(value string) *StatusApplyConfiguration { - b.Status = &value - return b -} - -// WithMessage sets the Message field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Message field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithMessage(value string) *StatusApplyConfiguration { - b.Message = &value - return b -} - -// WithReason sets the Reason field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Reason field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithReason(value metav1.StatusReason) *StatusApplyConfiguration { - b.Reason = &value - return b -} - -// WithDetails sets the Details field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Details field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithDetails(value *StatusDetailsApplyConfiguration) *StatusApplyConfiguration { - b.Details = value - return b -} - -// WithCode sets the Code field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Code field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithCode(value int32) *StatusApplyConfiguration { - b.Code = &value - return b -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/meta/v1/statuscause.go b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/meta/v1/statuscause.go deleted file mode 100644 index 7f05bca498dc..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/meta/v1/statuscause.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// StatusCauseApplyConfiguration represents an declarative configuration of the StatusCause type for use -// with apply. -type StatusCauseApplyConfiguration struct { - Type *v1.CauseType `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` - Field *string `json:"field,omitempty"` -} - -// StatusCauseApplyConfiguration constructs an declarative configuration of the StatusCause type for use with -// apply. -func StatusCause() *StatusCauseApplyConfiguration { - return &StatusCauseApplyConfiguration{} -} - -// WithType sets the Type field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Type field is set to the value of the last call. -func (b *StatusCauseApplyConfiguration) WithType(value v1.CauseType) *StatusCauseApplyConfiguration { - b.Type = &value - return b -} - -// WithMessage sets the Message field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Message field is set to the value of the last call. -func (b *StatusCauseApplyConfiguration) WithMessage(value string) *StatusCauseApplyConfiguration { - b.Message = &value - return b -} - -// WithField sets the Field field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Field field is set to the value of the last call. -func (b *StatusCauseApplyConfiguration) WithField(value string) *StatusCauseApplyConfiguration { - b.Field = &value - return b -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/meta/v1/statusdetails.go b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/meta/v1/statusdetails.go deleted file mode 100644 index a7dbaa1b26e0..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/meta/v1/statusdetails.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1 - -import ( - types "k8s.io/apimachinery/pkg/types" -) - -// StatusDetailsApplyConfiguration represents an declarative configuration of the StatusDetails type for use -// with apply. -type StatusDetailsApplyConfiguration struct { - Name *string `json:"name,omitempty"` - Group *string `json:"group,omitempty"` - Kind *string `json:"kind,omitempty"` - UID *types.UID `json:"uid,omitempty"` - Causes []StatusCauseApplyConfiguration `json:"causes,omitempty"` - RetryAfterSeconds *int32 `json:"retryAfterSeconds,omitempty"` -} - -// StatusDetailsApplyConfiguration constructs an declarative configuration of the StatusDetails type for use with -// apply. -func StatusDetails() *StatusDetailsApplyConfiguration { - return &StatusDetailsApplyConfiguration{} -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *StatusDetailsApplyConfiguration) WithName(value string) *StatusDetailsApplyConfiguration { - b.Name = &value - return b -} - -// WithGroup sets the Group field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Group field is set to the value of the last call. -func (b *StatusDetailsApplyConfiguration) WithGroup(value string) *StatusDetailsApplyConfiguration { - b.Group = &value - return b -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *StatusDetailsApplyConfiguration) WithKind(value string) *StatusDetailsApplyConfiguration { - b.Kind = &value - return b -} - -// WithUID sets the UID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the UID field is set to the value of the last call. -func (b *StatusDetailsApplyConfiguration) WithUID(value types.UID) *StatusDetailsApplyConfiguration { - b.UID = &value - return b -} - -// WithCauses adds the given value to the Causes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Causes field. -func (b *StatusDetailsApplyConfiguration) WithCauses(values ...*StatusCauseApplyConfiguration) *StatusDetailsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithCauses") - } - b.Causes = append(b.Causes, *values[i]) - } - return b -} - -// WithRetryAfterSeconds sets the RetryAfterSeconds field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RetryAfterSeconds field is set to the value of the last call. -func (b *StatusDetailsApplyConfiguration) WithRetryAfterSeconds(value int32) *StatusDetailsApplyConfiguration { - b.RetryAfterSeconds = &value - return b -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/discovery/discovery_client.go b/cluster-autoscaler/vendor/k8s.io/client-go/discovery/discovery_client.go index 9025e888ec1f..43906190fb7b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/discovery/discovery_client.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/discovery/discovery_client.go @@ -196,7 +196,7 @@ func (d *DiscoveryClient) GroupsAndMaybeResources() (*metav1.APIGroupList, map[s } // Discovery groups and (possibly) resources downloaded from /apis. apiGroups, apiResources, aerr := d.downloadAPIs() - if err != nil { + if aerr != nil { return nil, nil, aerr } // Merge apis groups into the legacy groups. diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/discovery/fake/discovery.go b/cluster-autoscaler/vendor/k8s.io/client-go/discovery/fake/discovery.go index c78c256ef7f7..d234db893ddd 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/discovery/fake/discovery.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/discovery/fake/discovery.go @@ -141,7 +141,10 @@ func (c *FakeDiscovery) ServerVersion() (*version.Info, error) { action := testing.ActionImpl{} action.Verb = "get" action.Resource = schema.GroupVersionResource{Resource: "version"} - c.Invokes(action, nil) + _, err := c.Invokes(action, nil) + if err != nil { + return nil, err + } if c.FakedServerVersion != nil { return c.FakedServerVersion, nil diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/dynamicinformer/informer.go b/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/dynamicinformer/informer.go index 40878b400f6a..9785613d2c59 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/dynamicinformer/informer.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/dynamicinformer/informer.go @@ -120,7 +120,7 @@ func (f *dynamicSharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) func NewFilteredDynamicInformer(client dynamic.Interface, gvr schema.GroupVersionResource, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions TweakListOptionsFunc) informers.GenericInformer { return &dynamicInformer{ gvr: gvr, - informer: cache.NewSharedIndexInformer( + informer: cache.NewSharedIndexInformerWithOptions( &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { if tweakListOptions != nil { @@ -136,8 +136,11 @@ func NewFilteredDynamicInformer(client dynamic.Interface, gvr schema.GroupVersio }, }, &unstructured.Unstructured{}, - resyncPeriod, - indexers, + cache.SharedIndexInformerOptions{ + ResyncPeriod: resyncPeriod, + Indexers: indexers, + ObjectDescription: gvr.String(), + }, ), } } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/doc.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/doc.go new file mode 100644 index 000000000000..231bffb69bda --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package informers provides generated informers for Kubernetes APIs. +package informers diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go index 6f0bea7e8706..600741e3a273 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go @@ -32,8 +32,6 @@ type Interface interface { Ingresses() IngressInformer // NetworkPolicies returns a NetworkPolicyInformer. NetworkPolicies() NetworkPolicyInformer - // PodSecurityPolicies returns a PodSecurityPolicyInformer. - PodSecurityPolicies() PodSecurityPolicyInformer // ReplicaSets returns a ReplicaSetInformer. ReplicaSets() ReplicaSetInformer } @@ -69,11 +67,6 @@ func (v *version) NetworkPolicies() NetworkPolicyInformer { return &networkPolicyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } -// PodSecurityPolicies returns a PodSecurityPolicyInformer. -func (v *version) PodSecurityPolicies() PodSecurityPolicyInformer { - return &podSecurityPolicyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} -} - // ReplicaSets returns a ReplicaSetInformer. func (v *version) ReplicaSets() ReplicaSetInformer { return &replicaSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go deleted file mode 100644 index 11be2751ccf4..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "context" - time "time" - - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/extensions/v1beta1" - cache "k8s.io/client-go/tools/cache" -) - -// PodSecurityPolicyInformer provides access to a shared informer and lister for -// PodSecurityPolicies. -type PodSecurityPolicyInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1beta1.PodSecurityPolicyLister -} - -type podSecurityPolicyInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// NewPodSecurityPolicyInformer constructs a new informer for PodSecurityPolicy type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewPodSecurityPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredPodSecurityPolicyInformer(client, resyncPeriod, indexers, nil) -} - -// NewFilteredPodSecurityPolicyInformer constructs a new informer for PodSecurityPolicy type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredPodSecurityPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ExtensionsV1beta1().PodSecurityPolicies().List(context.TODO(), options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ExtensionsV1beta1().PodSecurityPolicies().Watch(context.TODO(), options) - }, - }, - &extensionsv1beta1.PodSecurityPolicy{}, - resyncPeriod, - indexers, - ) -} - -func (f *podSecurityPolicyInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredPodSecurityPolicyInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *podSecurityPolicyInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&extensionsv1beta1.PodSecurityPolicy{}, f.defaultInformer) -} - -func (f *podSecurityPolicyInformer) Lister() v1beta1.PodSecurityPolicyLister { - return v1beta1.NewPodSecurityPolicyLister(f.Informer().GetIndexer()) -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/generic.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/generic.go index 59505bddaa04..ba4d4f429a0d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/generic.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/generic.go @@ -247,8 +247,6 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().Ingresses().Informer()}, nil case extensionsv1beta1.SchemeGroupVersion.WithResource("networkpolicies"): return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().NetworkPolicies().Informer()}, nil - case extensionsv1beta1.SchemeGroupVersion.WithResource("podsecuritypolicies"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().PodSecurityPolicies().Informer()}, nil case extensionsv1beta1.SchemeGroupVersion.WithResource("replicasets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().ReplicaSets().Informer()}, nil diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/doc.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/doc.go index b272334ad0ec..9cef4242f2f5 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/doc.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated clientset. +// Package kubernetes holds packages which implement a clientset for Kubernetes +// APIs. package kubernetes diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go index bbd4cf007ff0..b88598b7153b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - admissionregistrationv1 "k8s.io/api/admissionregistration/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationsadmissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" + admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" testing "k8s.io/client-go/testing" ) @@ -38,24 +37,24 @@ type FakeMutatingWebhookConfigurations struct { Fake *FakeAdmissionregistrationV1 } -var mutatingwebhookconfigurationsResource = schema.GroupVersionResource{Group: "admissionregistration.k8s.io", Version: "v1", Resource: "mutatingwebhookconfigurations"} +var mutatingwebhookconfigurationsResource = v1.SchemeGroupVersion.WithResource("mutatingwebhookconfigurations") -var mutatingwebhookconfigurationsKind = schema.GroupVersionKind{Group: "admissionregistration.k8s.io", Version: "v1", Kind: "MutatingWebhookConfiguration"} +var mutatingwebhookconfigurationsKind = v1.SchemeGroupVersion.WithKind("MutatingWebhookConfiguration") // Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. -func (c *FakeMutatingWebhookConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *admissionregistrationv1.MutatingWebhookConfiguration, err error) { +func (c *FakeMutatingWebhookConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.MutatingWebhookConfiguration, err error) { obj, err := c.Fake. - Invokes(testing.NewRootGetAction(mutatingwebhookconfigurationsResource, name), &admissionregistrationv1.MutatingWebhookConfiguration{}) + Invokes(testing.NewRootGetAction(mutatingwebhookconfigurationsResource, name), &v1.MutatingWebhookConfiguration{}) if obj == nil { return nil, err } - return obj.(*admissionregistrationv1.MutatingWebhookConfiguration), err + return obj.(*v1.MutatingWebhookConfiguration), err } // List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. -func (c *FakeMutatingWebhookConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *admissionregistrationv1.MutatingWebhookConfigurationList, err error) { +func (c *FakeMutatingWebhookConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.MutatingWebhookConfigurationList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootListAction(mutatingwebhookconfigurationsResource, mutatingwebhookconfigurationsKind, opts), &admissionregistrationv1.MutatingWebhookConfigurationList{}) + Invokes(testing.NewRootListAction(mutatingwebhookconfigurationsResource, mutatingwebhookconfigurationsKind, opts), &v1.MutatingWebhookConfigurationList{}) if obj == nil { return nil, err } @@ -64,8 +63,8 @@ func (c *FakeMutatingWebhookConfigurations) List(ctx context.Context, opts v1.Li if label == nil { label = labels.Everything() } - list := &admissionregistrationv1.MutatingWebhookConfigurationList{ListMeta: obj.(*admissionregistrationv1.MutatingWebhookConfigurationList).ListMeta} - for _, item := range obj.(*admissionregistrationv1.MutatingWebhookConfigurationList).Items { + list := &v1.MutatingWebhookConfigurationList{ListMeta: obj.(*v1.MutatingWebhookConfigurationList).ListMeta} + for _, item := range obj.(*v1.MutatingWebhookConfigurationList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -74,58 +73,58 @@ func (c *FakeMutatingWebhookConfigurations) List(ctx context.Context, opts v1.Li } // Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. -func (c *FakeMutatingWebhookConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeMutatingWebhookConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(mutatingwebhookconfigurationsResource, opts)) } // Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *FakeMutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfiguration, opts v1.CreateOptions) (result *admissionregistrationv1.MutatingWebhookConfiguration, err error) { +func (c *FakeMutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.CreateOptions) (result *v1.MutatingWebhookConfiguration, err error) { obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration), &admissionregistrationv1.MutatingWebhookConfiguration{}) + Invokes(testing.NewRootCreateAction(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration), &v1.MutatingWebhookConfiguration{}) if obj == nil { return nil, err } - return obj.(*admissionregistrationv1.MutatingWebhookConfiguration), err + return obj.(*v1.MutatingWebhookConfiguration), err } // Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *FakeMutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfiguration, opts v1.UpdateOptions) (result *admissionregistrationv1.MutatingWebhookConfiguration, err error) { +func (c *FakeMutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.UpdateOptions) (result *v1.MutatingWebhookConfiguration, err error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration), &admissionregistrationv1.MutatingWebhookConfiguration{}) + Invokes(testing.NewRootUpdateAction(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration), &v1.MutatingWebhookConfiguration{}) if obj == nil { return nil, err } - return obj.(*admissionregistrationv1.MutatingWebhookConfiguration), err + return obj.(*v1.MutatingWebhookConfiguration), err } // Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *FakeMutatingWebhookConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeMutatingWebhookConfigurations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(mutatingwebhookconfigurationsResource, name, opts), &admissionregistrationv1.MutatingWebhookConfiguration{}) + Invokes(testing.NewRootDeleteActionWithOptions(mutatingwebhookconfigurationsResource, name, opts), &v1.MutatingWebhookConfiguration{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeMutatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeMutatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(mutatingwebhookconfigurationsResource, listOpts) - _, err := c.Fake.Invokes(action, &admissionregistrationv1.MutatingWebhookConfigurationList{}) + _, err := c.Fake.Invokes(action, &v1.MutatingWebhookConfigurationList{}) return err } // Patch applies the patch and returns the patched mutatingWebhookConfiguration. -func (c *FakeMutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *admissionregistrationv1.MutatingWebhookConfiguration, err error) { +func (c *FakeMutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.MutatingWebhookConfiguration, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(mutatingwebhookconfigurationsResource, name, pt, data, subresources...), &admissionregistrationv1.MutatingWebhookConfiguration{}) + Invokes(testing.NewRootPatchSubresourceAction(mutatingwebhookconfigurationsResource, name, pt, data, subresources...), &v1.MutatingWebhookConfiguration{}) if obj == nil { return nil, err } - return obj.(*admissionregistrationv1.MutatingWebhookConfiguration), err + return obj.(*v1.MutatingWebhookConfiguration), err } // Apply takes the given apply declarative configuration, applies it and returns the applied mutatingWebhookConfiguration. -func (c *FakeMutatingWebhookConfigurations) Apply(ctx context.Context, mutatingWebhookConfiguration *applyconfigurationsadmissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *admissionregistrationv1.MutatingWebhookConfiguration, err error) { +func (c *FakeMutatingWebhookConfigurations) Apply(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.MutatingWebhookConfiguration, err error) { if mutatingWebhookConfiguration == nil { return nil, fmt.Errorf("mutatingWebhookConfiguration provided to Apply must not be nil") } @@ -138,9 +137,9 @@ func (c *FakeMutatingWebhookConfigurations) Apply(ctx context.Context, mutatingW return nil, fmt.Errorf("mutatingWebhookConfiguration.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(mutatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data), &admissionregistrationv1.MutatingWebhookConfiguration{}) + Invokes(testing.NewRootPatchSubresourceAction(mutatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data), &v1.MutatingWebhookConfiguration{}) if obj == nil { return nil, err } - return obj.(*admissionregistrationv1.MutatingWebhookConfiguration), err + return obj.(*v1.MutatingWebhookConfiguration), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go index ee769dd3cba1..a6951c736e06 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - admissionregistrationv1 "k8s.io/api/admissionregistration/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationsadmissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" + admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" testing "k8s.io/client-go/testing" ) @@ -38,24 +37,24 @@ type FakeValidatingWebhookConfigurations struct { Fake *FakeAdmissionregistrationV1 } -var validatingwebhookconfigurationsResource = schema.GroupVersionResource{Group: "admissionregistration.k8s.io", Version: "v1", Resource: "validatingwebhookconfigurations"} +var validatingwebhookconfigurationsResource = v1.SchemeGroupVersion.WithResource("validatingwebhookconfigurations") -var validatingwebhookconfigurationsKind = schema.GroupVersionKind{Group: "admissionregistration.k8s.io", Version: "v1", Kind: "ValidatingWebhookConfiguration"} +var validatingwebhookconfigurationsKind = v1.SchemeGroupVersion.WithKind("ValidatingWebhookConfiguration") // Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. -func (c *FakeValidatingWebhookConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *admissionregistrationv1.ValidatingWebhookConfiguration, err error) { +func (c *FakeValidatingWebhookConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ValidatingWebhookConfiguration, err error) { obj, err := c.Fake. - Invokes(testing.NewRootGetAction(validatingwebhookconfigurationsResource, name), &admissionregistrationv1.ValidatingWebhookConfiguration{}) + Invokes(testing.NewRootGetAction(validatingwebhookconfigurationsResource, name), &v1.ValidatingWebhookConfiguration{}) if obj == nil { return nil, err } - return obj.(*admissionregistrationv1.ValidatingWebhookConfiguration), err + return obj.(*v1.ValidatingWebhookConfiguration), err } // List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. -func (c *FakeValidatingWebhookConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *admissionregistrationv1.ValidatingWebhookConfigurationList, err error) { +func (c *FakeValidatingWebhookConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ValidatingWebhookConfigurationList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootListAction(validatingwebhookconfigurationsResource, validatingwebhookconfigurationsKind, opts), &admissionregistrationv1.ValidatingWebhookConfigurationList{}) + Invokes(testing.NewRootListAction(validatingwebhookconfigurationsResource, validatingwebhookconfigurationsKind, opts), &v1.ValidatingWebhookConfigurationList{}) if obj == nil { return nil, err } @@ -64,8 +63,8 @@ func (c *FakeValidatingWebhookConfigurations) List(ctx context.Context, opts v1. if label == nil { label = labels.Everything() } - list := &admissionregistrationv1.ValidatingWebhookConfigurationList{ListMeta: obj.(*admissionregistrationv1.ValidatingWebhookConfigurationList).ListMeta} - for _, item := range obj.(*admissionregistrationv1.ValidatingWebhookConfigurationList).Items { + list := &v1.ValidatingWebhookConfigurationList{ListMeta: obj.(*v1.ValidatingWebhookConfigurationList).ListMeta} + for _, item := range obj.(*v1.ValidatingWebhookConfigurationList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -74,58 +73,58 @@ func (c *FakeValidatingWebhookConfigurations) List(ctx context.Context, opts v1. } // Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. -func (c *FakeValidatingWebhookConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeValidatingWebhookConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(validatingwebhookconfigurationsResource, opts)) } // Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *FakeValidatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfiguration, opts v1.CreateOptions) (result *admissionregistrationv1.ValidatingWebhookConfiguration, err error) { +func (c *FakeValidatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.CreateOptions) (result *v1.ValidatingWebhookConfiguration, err error) { obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(validatingwebhookconfigurationsResource, validatingWebhookConfiguration), &admissionregistrationv1.ValidatingWebhookConfiguration{}) + Invokes(testing.NewRootCreateAction(validatingwebhookconfigurationsResource, validatingWebhookConfiguration), &v1.ValidatingWebhookConfiguration{}) if obj == nil { return nil, err } - return obj.(*admissionregistrationv1.ValidatingWebhookConfiguration), err + return obj.(*v1.ValidatingWebhookConfiguration), err } // Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *FakeValidatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfiguration, opts v1.UpdateOptions) (result *admissionregistrationv1.ValidatingWebhookConfiguration, err error) { +func (c *FakeValidatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.UpdateOptions) (result *v1.ValidatingWebhookConfiguration, err error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(validatingwebhookconfigurationsResource, validatingWebhookConfiguration), &admissionregistrationv1.ValidatingWebhookConfiguration{}) + Invokes(testing.NewRootUpdateAction(validatingwebhookconfigurationsResource, validatingWebhookConfiguration), &v1.ValidatingWebhookConfiguration{}) if obj == nil { return nil, err } - return obj.(*admissionregistrationv1.ValidatingWebhookConfiguration), err + return obj.(*v1.ValidatingWebhookConfiguration), err } // Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *FakeValidatingWebhookConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeValidatingWebhookConfigurations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(validatingwebhookconfigurationsResource, name, opts), &admissionregistrationv1.ValidatingWebhookConfiguration{}) + Invokes(testing.NewRootDeleteActionWithOptions(validatingwebhookconfigurationsResource, name, opts), &v1.ValidatingWebhookConfiguration{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeValidatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeValidatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(validatingwebhookconfigurationsResource, listOpts) - _, err := c.Fake.Invokes(action, &admissionregistrationv1.ValidatingWebhookConfigurationList{}) + _, err := c.Fake.Invokes(action, &v1.ValidatingWebhookConfigurationList{}) return err } // Patch applies the patch and returns the patched validatingWebhookConfiguration. -func (c *FakeValidatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *admissionregistrationv1.ValidatingWebhookConfiguration, err error) { +func (c *FakeValidatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingWebhookConfiguration, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingwebhookconfigurationsResource, name, pt, data, subresources...), &admissionregistrationv1.ValidatingWebhookConfiguration{}) + Invokes(testing.NewRootPatchSubresourceAction(validatingwebhookconfigurationsResource, name, pt, data, subresources...), &v1.ValidatingWebhookConfiguration{}) if obj == nil { return nil, err } - return obj.(*admissionregistrationv1.ValidatingWebhookConfiguration), err + return obj.(*v1.ValidatingWebhookConfiguration), err } // Apply takes the given apply declarative configuration, applies it and returns the applied validatingWebhookConfiguration. -func (c *FakeValidatingWebhookConfigurations) Apply(ctx context.Context, validatingWebhookConfiguration *applyconfigurationsadmissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *admissionregistrationv1.ValidatingWebhookConfiguration, err error) { +func (c *FakeValidatingWebhookConfigurations) Apply(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingWebhookConfiguration, err error) { if validatingWebhookConfiguration == nil { return nil, fmt.Errorf("validatingWebhookConfiguration provided to Apply must not be nil") } @@ -138,9 +137,9 @@ func (c *FakeValidatingWebhookConfigurations) Apply(ctx context.Context, validat return nil, fmt.Errorf("validatingWebhookConfiguration.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data), &admissionregistrationv1.ValidatingWebhookConfiguration{}) + Invokes(testing.NewRootPatchSubresourceAction(validatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data), &v1.ValidatingWebhookConfiguration{}) if obj == nil { return nil, err } - return obj.(*admissionregistrationv1.ValidatingWebhookConfiguration), err + return obj.(*v1.ValidatingWebhookConfiguration), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicy.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicy.go index 2fed66b44da5..38d7713d9463 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicy.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicy.go @@ -26,7 +26,6 @@ import ( v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1" @@ -38,9 +37,9 @@ type FakeValidatingAdmissionPolicies struct { Fake *FakeAdmissionregistrationV1alpha1 } -var validatingadmissionpoliciesResource = schema.GroupVersionResource{Group: "admissionregistration.k8s.io", Version: "v1alpha1", Resource: "validatingadmissionpolicies"} +var validatingadmissionpoliciesResource = v1alpha1.SchemeGroupVersion.WithResource("validatingadmissionpolicies") -var validatingadmissionpoliciesKind = schema.GroupVersionKind{Group: "admissionregistration.k8s.io", Version: "v1alpha1", Kind: "ValidatingAdmissionPolicy"} +var validatingadmissionpoliciesKind = v1alpha1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicy") // Get takes name of the validatingAdmissionPolicy, and returns the corresponding validatingAdmissionPolicy object, and an error if there is any. func (c *FakeValidatingAdmissionPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicybinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicybinding.go index 7c9cb88aab89..c520655f9dd1 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicybinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicybinding.go @@ -26,7 +26,6 @@ import ( v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1" @@ -38,9 +37,9 @@ type FakeValidatingAdmissionPolicyBindings struct { Fake *FakeAdmissionregistrationV1alpha1 } -var validatingadmissionpolicybindingsResource = schema.GroupVersionResource{Group: "admissionregistration.k8s.io", Version: "v1alpha1", Resource: "validatingadmissionpolicybindings"} +var validatingadmissionpolicybindingsResource = v1alpha1.SchemeGroupVersion.WithResource("validatingadmissionpolicybindings") -var validatingadmissionpolicybindingsKind = schema.GroupVersionKind{Group: "admissionregistration.k8s.io", Version: "v1alpha1", Kind: "ValidatingAdmissionPolicyBinding"} +var validatingadmissionpolicybindingsKind = v1alpha1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyBinding") // Get takes name of the validatingAdmissionPolicyBinding, and returns the corresponding validatingAdmissionPolicyBinding object, and an error if there is any. func (c *FakeValidatingAdmissionPolicyBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go index a06bb7db102f..9d85aff37fbb 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go @@ -26,7 +26,6 @@ import ( v1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" @@ -38,9 +37,9 @@ type FakeMutatingWebhookConfigurations struct { Fake *FakeAdmissionregistrationV1beta1 } -var mutatingwebhookconfigurationsResource = schema.GroupVersionResource{Group: "admissionregistration.k8s.io", Version: "v1beta1", Resource: "mutatingwebhookconfigurations"} +var mutatingwebhookconfigurationsResource = v1beta1.SchemeGroupVersion.WithResource("mutatingwebhookconfigurations") -var mutatingwebhookconfigurationsKind = schema.GroupVersionKind{Group: "admissionregistration.k8s.io", Version: "v1beta1", Kind: "MutatingWebhookConfiguration"} +var mutatingwebhookconfigurationsKind = v1beta1.SchemeGroupVersion.WithKind("MutatingWebhookConfiguration") // Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. func (c *FakeMutatingWebhookConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go index 27495b31de33..41e3a7c1ee8d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go @@ -26,7 +26,6 @@ import ( v1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" @@ -38,9 +37,9 @@ type FakeValidatingWebhookConfigurations struct { Fake *FakeAdmissionregistrationV1beta1 } -var validatingwebhookconfigurationsResource = schema.GroupVersionResource{Group: "admissionregistration.k8s.io", Version: "v1beta1", Resource: "validatingwebhookconfigurations"} +var validatingwebhookconfigurationsResource = v1beta1.SchemeGroupVersion.WithResource("validatingwebhookconfigurations") -var validatingwebhookconfigurationsKind = schema.GroupVersionKind{Group: "admissionregistration.k8s.io", Version: "v1beta1", Kind: "ValidatingWebhookConfiguration"} +var validatingwebhookconfigurationsKind = v1beta1.SchemeGroupVersion.WithKind("ValidatingWebhookConfiguration") // Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. func (c *FakeValidatingWebhookConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_storageversion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_storageversion.go index 8b6a84d7a9bb..738c68038b57 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_storageversion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_storageversion.go @@ -26,7 +26,6 @@ import ( v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" apiserverinternalv1alpha1 "k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1" @@ -38,9 +37,9 @@ type FakeStorageVersions struct { Fake *FakeInternalV1alpha1 } -var storageversionsResource = schema.GroupVersionResource{Group: "internal.apiserver.k8s.io", Version: "v1alpha1", Resource: "storageversions"} +var storageversionsResource = v1alpha1.SchemeGroupVersion.WithResource("storageversions") -var storageversionsKind = schema.GroupVersionKind{Group: "internal.apiserver.k8s.io", Version: "v1alpha1", Kind: "StorageVersion"} +var storageversionsKind = v1alpha1.SchemeGroupVersion.WithKind("StorageVersion") // Get takes name of the storageVersion, and returns the corresponding storageVersion object, and an error if there is any. func (c *FakeStorageVersions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.StorageVersion, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go index 1116d6dffc00..f691ba9acd1f 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationsappsv1 "k8s.io/client-go/applyconfigurations/apps/v1" + appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" testing "k8s.io/client-go/testing" ) @@ -39,25 +38,25 @@ type FakeControllerRevisions struct { ns string } -var controllerrevisionsResource = schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "controllerrevisions"} +var controllerrevisionsResource = v1.SchemeGroupVersion.WithResource("controllerrevisions") -var controllerrevisionsKind = schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ControllerRevision"} +var controllerrevisionsKind = v1.SchemeGroupVersion.WithKind("ControllerRevision") // Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. -func (c *FakeControllerRevisions) Get(ctx context.Context, name string, options v1.GetOptions) (result *appsv1.ControllerRevision, err error) { +func (c *FakeControllerRevisions) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ControllerRevision, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(controllerrevisionsResource, c.ns, name), &appsv1.ControllerRevision{}) + Invokes(testing.NewGetAction(controllerrevisionsResource, c.ns, name), &v1.ControllerRevision{}) if obj == nil { return nil, err } - return obj.(*appsv1.ControllerRevision), err + return obj.(*v1.ControllerRevision), err } // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. -func (c *FakeControllerRevisions) List(ctx context.Context, opts v1.ListOptions) (result *appsv1.ControllerRevisionList, err error) { +func (c *FakeControllerRevisions) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ControllerRevisionList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), &appsv1.ControllerRevisionList{}) + Invokes(testing.NewListAction(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), &v1.ControllerRevisionList{}) if obj == nil { return nil, err @@ -67,8 +66,8 @@ func (c *FakeControllerRevisions) List(ctx context.Context, opts v1.ListOptions) if label == nil { label = labels.Everything() } - list := &appsv1.ControllerRevisionList{ListMeta: obj.(*appsv1.ControllerRevisionList).ListMeta} - for _, item := range obj.(*appsv1.ControllerRevisionList).Items { + list := &v1.ControllerRevisionList{ListMeta: obj.(*v1.ControllerRevisionList).ListMeta} + for _, item := range obj.(*v1.ControllerRevisionList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -77,63 +76,63 @@ func (c *FakeControllerRevisions) List(ctx context.Context, opts v1.ListOptions) } // Watch returns a watch.Interface that watches the requested controllerRevisions. -func (c *FakeControllerRevisions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeControllerRevisions) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(controllerrevisionsResource, c.ns, opts)) } // Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *FakeControllerRevisions) Create(ctx context.Context, controllerRevision *appsv1.ControllerRevision, opts v1.CreateOptions) (result *appsv1.ControllerRevision, err error) { +func (c *FakeControllerRevisions) Create(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.CreateOptions) (result *v1.ControllerRevision, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(controllerrevisionsResource, c.ns, controllerRevision), &appsv1.ControllerRevision{}) + Invokes(testing.NewCreateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1.ControllerRevision{}) if obj == nil { return nil, err } - return obj.(*appsv1.ControllerRevision), err + return obj.(*v1.ControllerRevision), err } // Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *FakeControllerRevisions) Update(ctx context.Context, controllerRevision *appsv1.ControllerRevision, opts v1.UpdateOptions) (result *appsv1.ControllerRevision, err error) { +func (c *FakeControllerRevisions) Update(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.UpdateOptions) (result *v1.ControllerRevision, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(controllerrevisionsResource, c.ns, controllerRevision), &appsv1.ControllerRevision{}) + Invokes(testing.NewUpdateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1.ControllerRevision{}) if obj == nil { return nil, err } - return obj.(*appsv1.ControllerRevision), err + return obj.(*v1.ControllerRevision), err } // Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. -func (c *FakeControllerRevisions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeControllerRevisions) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(controllerrevisionsResource, c.ns, name, opts), &appsv1.ControllerRevision{}) + Invokes(testing.NewDeleteActionWithOptions(controllerrevisionsResource, c.ns, name, opts), &v1.ControllerRevision{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeControllerRevisions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeControllerRevisions) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewDeleteCollectionAction(controllerrevisionsResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &appsv1.ControllerRevisionList{}) + _, err := c.Fake.Invokes(action, &v1.ControllerRevisionList{}) return err } // Patch applies the patch and returns the patched controllerRevision. -func (c *FakeControllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1.ControllerRevision, err error) { +func (c *FakeControllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ControllerRevision, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, pt, data, subresources...), &appsv1.ControllerRevision{}) + Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, pt, data, subresources...), &v1.ControllerRevision{}) if obj == nil { return nil, err } - return obj.(*appsv1.ControllerRevision), err + return obj.(*v1.ControllerRevision), err } // Apply takes the given apply declarative configuration, applies it and returns the applied controllerRevision. -func (c *FakeControllerRevisions) Apply(ctx context.Context, controllerRevision *applyconfigurationsappsv1.ControllerRevisionApplyConfiguration, opts v1.ApplyOptions) (result *appsv1.ControllerRevision, err error) { +func (c *FakeControllerRevisions) Apply(ctx context.Context, controllerRevision *appsv1.ControllerRevisionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ControllerRevision, err error) { if controllerRevision == nil { return nil, fmt.Errorf("controllerRevision provided to Apply must not be nil") } @@ -146,10 +145,10 @@ func (c *FakeControllerRevisions) Apply(ctx context.Context, controllerRevision return nil, fmt.Errorf("controllerRevision.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, *name, types.ApplyPatchType, data), &appsv1.ControllerRevision{}) + Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, *name, types.ApplyPatchType, data), &v1.ControllerRevision{}) if obj == nil { return nil, err } - return obj.(*appsv1.ControllerRevision), err + return obj.(*v1.ControllerRevision), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go index 00c28df9ba8a..3e0df7235289 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationsappsv1 "k8s.io/client-go/applyconfigurations/apps/v1" + appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" testing "k8s.io/client-go/testing" ) @@ -39,25 +38,25 @@ type FakeDaemonSets struct { ns string } -var daemonsetsResource = schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "daemonsets"} +var daemonsetsResource = v1.SchemeGroupVersion.WithResource("daemonsets") -var daemonsetsKind = schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "DaemonSet"} +var daemonsetsKind = v1.SchemeGroupVersion.WithKind("DaemonSet") // Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *FakeDaemonSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *appsv1.DaemonSet, err error) { +func (c *FakeDaemonSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.DaemonSet, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(daemonsetsResource, c.ns, name), &appsv1.DaemonSet{}) + Invokes(testing.NewGetAction(daemonsetsResource, c.ns, name), &v1.DaemonSet{}) if obj == nil { return nil, err } - return obj.(*appsv1.DaemonSet), err + return obj.(*v1.DaemonSet), err } // List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *FakeDaemonSets) List(ctx context.Context, opts v1.ListOptions) (result *appsv1.DaemonSetList, err error) { +func (c *FakeDaemonSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.DaemonSetList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(daemonsetsResource, daemonsetsKind, c.ns, opts), &appsv1.DaemonSetList{}) + Invokes(testing.NewListAction(daemonsetsResource, daemonsetsKind, c.ns, opts), &v1.DaemonSetList{}) if obj == nil { return nil, err @@ -67,8 +66,8 @@ func (c *FakeDaemonSets) List(ctx context.Context, opts v1.ListOptions) (result if label == nil { label = labels.Everything() } - list := &appsv1.DaemonSetList{ListMeta: obj.(*appsv1.DaemonSetList).ListMeta} - for _, item := range obj.(*appsv1.DaemonSetList).Items { + list := &v1.DaemonSetList{ListMeta: obj.(*v1.DaemonSetList).ListMeta} + for _, item := range obj.(*v1.DaemonSetList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -77,75 +76,75 @@ func (c *FakeDaemonSets) List(ctx context.Context, opts v1.ListOptions) (result } // Watch returns a watch.Interface that watches the requested daemonSets. -func (c *FakeDaemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeDaemonSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(daemonsetsResource, c.ns, opts)) } // Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *FakeDaemonSets) Create(ctx context.Context, daemonSet *appsv1.DaemonSet, opts v1.CreateOptions) (result *appsv1.DaemonSet, err error) { +func (c *FakeDaemonSets) Create(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.CreateOptions) (result *v1.DaemonSet, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(daemonsetsResource, c.ns, daemonSet), &appsv1.DaemonSet{}) + Invokes(testing.NewCreateAction(daemonsetsResource, c.ns, daemonSet), &v1.DaemonSet{}) if obj == nil { return nil, err } - return obj.(*appsv1.DaemonSet), err + return obj.(*v1.DaemonSet), err } // Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *FakeDaemonSets) Update(ctx context.Context, daemonSet *appsv1.DaemonSet, opts v1.UpdateOptions) (result *appsv1.DaemonSet, err error) { +func (c *FakeDaemonSets) Update(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (result *v1.DaemonSet, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(daemonsetsResource, c.ns, daemonSet), &appsv1.DaemonSet{}) + Invokes(testing.NewUpdateAction(daemonsetsResource, c.ns, daemonSet), &v1.DaemonSet{}) if obj == nil { return nil, err } - return obj.(*appsv1.DaemonSet), err + return obj.(*v1.DaemonSet), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *appsv1.DaemonSet, opts v1.UpdateOptions) (*appsv1.DaemonSet, error) { +func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (*v1.DaemonSet, error) { obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(daemonsetsResource, "status", c.ns, daemonSet), &appsv1.DaemonSet{}) + Invokes(testing.NewUpdateSubresourceAction(daemonsetsResource, "status", c.ns, daemonSet), &v1.DaemonSet{}) if obj == nil { return nil, err } - return obj.(*appsv1.DaemonSet), err + return obj.(*v1.DaemonSet), err } // Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. -func (c *FakeDaemonSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeDaemonSets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(daemonsetsResource, c.ns, name, opts), &appsv1.DaemonSet{}) + Invokes(testing.NewDeleteActionWithOptions(daemonsetsResource, c.ns, name, opts), &v1.DaemonSet{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeDaemonSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeDaemonSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewDeleteCollectionAction(daemonsetsResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &appsv1.DaemonSetList{}) + _, err := c.Fake.Invokes(action, &v1.DaemonSetList{}) return err } // Patch applies the patch and returns the patched daemonSet. -func (c *FakeDaemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1.DaemonSet, err error) { +func (c *FakeDaemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DaemonSet, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, pt, data, subresources...), &appsv1.DaemonSet{}) + Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, pt, data, subresources...), &v1.DaemonSet{}) if obj == nil { return nil, err } - return obj.(*appsv1.DaemonSet), err + return obj.(*v1.DaemonSet), err } // Apply takes the given apply declarative configuration, applies it and returns the applied daemonSet. -func (c *FakeDaemonSets) Apply(ctx context.Context, daemonSet *applyconfigurationsappsv1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *appsv1.DaemonSet, err error) { +func (c *FakeDaemonSets) Apply(ctx context.Context, daemonSet *appsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DaemonSet, err error) { if daemonSet == nil { return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") } @@ -158,17 +157,17 @@ func (c *FakeDaemonSets) Apply(ctx context.Context, daemonSet *applyconfiguratio return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data), &appsv1.DaemonSet{}) + Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data), &v1.DaemonSet{}) if obj == nil { return nil, err } - return obj.(*appsv1.DaemonSet), err + return obj.(*v1.DaemonSet), err } // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeDaemonSets) ApplyStatus(ctx context.Context, daemonSet *applyconfigurationsappsv1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *appsv1.DaemonSet, err error) { +func (c *FakeDaemonSets) ApplyStatus(ctx context.Context, daemonSet *appsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DaemonSet, err error) { if daemonSet == nil { return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") } @@ -181,10 +180,10 @@ func (c *FakeDaemonSets) ApplyStatus(ctx context.Context, daemonSet *applyconfig return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &appsv1.DaemonSet{}) + Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.DaemonSet{}) if obj == nil { return nil, err } - return obj.(*appsv1.DaemonSet), err + return obj.(*v1.DaemonSet), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go index 8899824aa584..da1896fe604c 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go @@ -23,14 +23,13 @@ import ( json "encoding/json" "fmt" - appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/apps/v1" autoscalingv1 "k8s.io/api/autoscaling/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationsappsv1 "k8s.io/client-go/applyconfigurations/apps/v1" + appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" testing "k8s.io/client-go/testing" ) @@ -41,25 +40,25 @@ type FakeDeployments struct { ns string } -var deploymentsResource = schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"} +var deploymentsResource = v1.SchemeGroupVersion.WithResource("deployments") -var deploymentsKind = schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"} +var deploymentsKind = v1.SchemeGroupVersion.WithKind("Deployment") // Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *FakeDeployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *appsv1.Deployment, err error) { +func (c *FakeDeployments) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Deployment, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(deploymentsResource, c.ns, name), &appsv1.Deployment{}) + Invokes(testing.NewGetAction(deploymentsResource, c.ns, name), &v1.Deployment{}) if obj == nil { return nil, err } - return obj.(*appsv1.Deployment), err + return obj.(*v1.Deployment), err } // List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *FakeDeployments) List(ctx context.Context, opts v1.ListOptions) (result *appsv1.DeploymentList, err error) { +func (c *FakeDeployments) List(ctx context.Context, opts metav1.ListOptions) (result *v1.DeploymentList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(deploymentsResource, deploymentsKind, c.ns, opts), &appsv1.DeploymentList{}) + Invokes(testing.NewListAction(deploymentsResource, deploymentsKind, c.ns, opts), &v1.DeploymentList{}) if obj == nil { return nil, err @@ -69,8 +68,8 @@ func (c *FakeDeployments) List(ctx context.Context, opts v1.ListOptions) (result if label == nil { label = labels.Everything() } - list := &appsv1.DeploymentList{ListMeta: obj.(*appsv1.DeploymentList).ListMeta} - for _, item := range obj.(*appsv1.DeploymentList).Items { + list := &v1.DeploymentList{ListMeta: obj.(*v1.DeploymentList).ListMeta} + for _, item := range obj.(*v1.DeploymentList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -79,75 +78,75 @@ func (c *FakeDeployments) List(ctx context.Context, opts v1.ListOptions) (result } // Watch returns a watch.Interface that watches the requested deployments. -func (c *FakeDeployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeDeployments) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(deploymentsResource, c.ns, opts)) } // Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *FakeDeployments) Create(ctx context.Context, deployment *appsv1.Deployment, opts v1.CreateOptions) (result *appsv1.Deployment, err error) { +func (c *FakeDeployments) Create(ctx context.Context, deployment *v1.Deployment, opts metav1.CreateOptions) (result *v1.Deployment, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &appsv1.Deployment{}) + Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &v1.Deployment{}) if obj == nil { return nil, err } - return obj.(*appsv1.Deployment), err + return obj.(*v1.Deployment), err } // Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *FakeDeployments) Update(ctx context.Context, deployment *appsv1.Deployment, opts v1.UpdateOptions) (result *appsv1.Deployment, err error) { +func (c *FakeDeployments) Update(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (result *v1.Deployment, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &appsv1.Deployment{}) + Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1.Deployment{}) if obj == nil { return nil, err } - return obj.(*appsv1.Deployment), err + return obj.(*v1.Deployment), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *appsv1.Deployment, opts v1.UpdateOptions) (*appsv1.Deployment, error) { +func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (*v1.Deployment, error) { obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &appsv1.Deployment{}) + Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1.Deployment{}) if obj == nil { return nil, err } - return obj.(*appsv1.Deployment), err + return obj.(*v1.Deployment), err } // Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *FakeDeployments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeDeployments) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(deploymentsResource, c.ns, name, opts), &appsv1.Deployment{}) + Invokes(testing.NewDeleteActionWithOptions(deploymentsResource, c.ns, name, opts), &v1.Deployment{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeDeployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeDeployments) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewDeleteCollectionAction(deploymentsResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &appsv1.DeploymentList{}) + _, err := c.Fake.Invokes(action, &v1.DeploymentList{}) return err } // Patch applies the patch and returns the patched deployment. -func (c *FakeDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1.Deployment, err error) { +func (c *FakeDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Deployment, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, pt, data, subresources...), &appsv1.Deployment{}) + Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, pt, data, subresources...), &v1.Deployment{}) if obj == nil { return nil, err } - return obj.(*appsv1.Deployment), err + return obj.(*v1.Deployment), err } // Apply takes the given apply declarative configuration, applies it and returns the applied deployment. -func (c *FakeDeployments) Apply(ctx context.Context, deployment *applyconfigurationsappsv1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *appsv1.Deployment, err error) { +func (c *FakeDeployments) Apply(ctx context.Context, deployment *appsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Deployment, err error) { if deployment == nil { return nil, fmt.Errorf("deployment provided to Apply must not be nil") } @@ -160,17 +159,17 @@ func (c *FakeDeployments) Apply(ctx context.Context, deployment *applyconfigurat return nil, fmt.Errorf("deployment.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data), &appsv1.Deployment{}) + Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Deployment{}) if obj == nil { return nil, err } - return obj.(*appsv1.Deployment), err + return obj.(*v1.Deployment), err } // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeDeployments) ApplyStatus(ctx context.Context, deployment *applyconfigurationsappsv1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *appsv1.Deployment, err error) { +func (c *FakeDeployments) ApplyStatus(ctx context.Context, deployment *appsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Deployment, err error) { if deployment == nil { return nil, fmt.Errorf("deployment provided to Apply must not be nil") } @@ -183,16 +182,16 @@ func (c *FakeDeployments) ApplyStatus(ctx context.Context, deployment *applyconf return nil, fmt.Errorf("deployment.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &appsv1.Deployment{}) + Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.Deployment{}) if obj == nil { return nil, err } - return obj.(*appsv1.Deployment), err + return obj.(*v1.Deployment), err } // GetScale takes name of the deployment, and returns the corresponding scale object, and an error if there is any. -func (c *FakeDeployments) GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (result *autoscalingv1.Scale, err error) { +func (c *FakeDeployments) GetScale(ctx context.Context, deploymentName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { obj, err := c.Fake. Invokes(testing.NewGetSubresourceAction(deploymentsResource, c.ns, "scale", deploymentName), &autoscalingv1.Scale{}) @@ -203,7 +202,7 @@ func (c *FakeDeployments) GetScale(ctx context.Context, deploymentName string, o } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeDeployments) UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale, opts v1.UpdateOptions) (result *autoscalingv1.Scale, err error) { +func (c *FakeDeployments) UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "scale", c.ns, scale), &autoscalingv1.Scale{}) @@ -215,7 +214,7 @@ func (c *FakeDeployments) UpdateScale(ctx context.Context, deploymentName string // ApplyScale takes top resource name and the apply declarative configuration for scale, // applies it and returns the applied scale, and an error, if there is any. -func (c *FakeDeployments) ApplyScale(ctx context.Context, deploymentName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *autoscalingv1.Scale, err error) { +func (c *FakeDeployments) ApplyScale(ctx context.Context, deploymentName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (result *autoscalingv1.Scale, err error) { if scale == nil { return nil, fmt.Errorf("scale provided to ApplyScale must not be nil") } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go index 39acfafeb257..dedf19b42f7b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go @@ -23,14 +23,13 @@ import ( json "encoding/json" "fmt" - appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/apps/v1" autoscalingv1 "k8s.io/api/autoscaling/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationsappsv1 "k8s.io/client-go/applyconfigurations/apps/v1" + appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" testing "k8s.io/client-go/testing" ) @@ -41,25 +40,25 @@ type FakeReplicaSets struct { ns string } -var replicasetsResource = schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "replicasets"} +var replicasetsResource = v1.SchemeGroupVersion.WithResource("replicasets") -var replicasetsKind = schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ReplicaSet"} +var replicasetsKind = v1.SchemeGroupVersion.WithKind("ReplicaSet") // Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. -func (c *FakeReplicaSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *appsv1.ReplicaSet, err error) { +func (c *FakeReplicaSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ReplicaSet, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(replicasetsResource, c.ns, name), &appsv1.ReplicaSet{}) + Invokes(testing.NewGetAction(replicasetsResource, c.ns, name), &v1.ReplicaSet{}) if obj == nil { return nil, err } - return obj.(*appsv1.ReplicaSet), err + return obj.(*v1.ReplicaSet), err } // List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. -func (c *FakeReplicaSets) List(ctx context.Context, opts v1.ListOptions) (result *appsv1.ReplicaSetList, err error) { +func (c *FakeReplicaSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ReplicaSetList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(replicasetsResource, replicasetsKind, c.ns, opts), &appsv1.ReplicaSetList{}) + Invokes(testing.NewListAction(replicasetsResource, replicasetsKind, c.ns, opts), &v1.ReplicaSetList{}) if obj == nil { return nil, err @@ -69,8 +68,8 @@ func (c *FakeReplicaSets) List(ctx context.Context, opts v1.ListOptions) (result if label == nil { label = labels.Everything() } - list := &appsv1.ReplicaSetList{ListMeta: obj.(*appsv1.ReplicaSetList).ListMeta} - for _, item := range obj.(*appsv1.ReplicaSetList).Items { + list := &v1.ReplicaSetList{ListMeta: obj.(*v1.ReplicaSetList).ListMeta} + for _, item := range obj.(*v1.ReplicaSetList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -79,75 +78,75 @@ func (c *FakeReplicaSets) List(ctx context.Context, opts v1.ListOptions) (result } // Watch returns a watch.Interface that watches the requested replicaSets. -func (c *FakeReplicaSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeReplicaSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(replicasetsResource, c.ns, opts)) } // Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *FakeReplicaSets) Create(ctx context.Context, replicaSet *appsv1.ReplicaSet, opts v1.CreateOptions) (result *appsv1.ReplicaSet, err error) { +func (c *FakeReplicaSets) Create(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.CreateOptions) (result *v1.ReplicaSet, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(replicasetsResource, c.ns, replicaSet), &appsv1.ReplicaSet{}) + Invokes(testing.NewCreateAction(replicasetsResource, c.ns, replicaSet), &v1.ReplicaSet{}) if obj == nil { return nil, err } - return obj.(*appsv1.ReplicaSet), err + return obj.(*v1.ReplicaSet), err } // Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *FakeReplicaSets) Update(ctx context.Context, replicaSet *appsv1.ReplicaSet, opts v1.UpdateOptions) (result *appsv1.ReplicaSet, err error) { +func (c *FakeReplicaSets) Update(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (result *v1.ReplicaSet, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(replicasetsResource, c.ns, replicaSet), &appsv1.ReplicaSet{}) + Invokes(testing.NewUpdateAction(replicasetsResource, c.ns, replicaSet), &v1.ReplicaSet{}) if obj == nil { return nil, err } - return obj.(*appsv1.ReplicaSet), err + return obj.(*v1.ReplicaSet), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *appsv1.ReplicaSet, opts v1.UpdateOptions) (*appsv1.ReplicaSet, error) { +func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (*v1.ReplicaSet, error) { obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "status", c.ns, replicaSet), &appsv1.ReplicaSet{}) + Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "status", c.ns, replicaSet), &v1.ReplicaSet{}) if obj == nil { return nil, err } - return obj.(*appsv1.ReplicaSet), err + return obj.(*v1.ReplicaSet), err } // Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *FakeReplicaSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeReplicaSets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(replicasetsResource, c.ns, name, opts), &appsv1.ReplicaSet{}) + Invokes(testing.NewDeleteActionWithOptions(replicasetsResource, c.ns, name, opts), &v1.ReplicaSet{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeReplicaSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeReplicaSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewDeleteCollectionAction(replicasetsResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &appsv1.ReplicaSetList{}) + _, err := c.Fake.Invokes(action, &v1.ReplicaSetList{}) return err } // Patch applies the patch and returns the patched replicaSet. -func (c *FakeReplicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1.ReplicaSet, err error) { +func (c *FakeReplicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicaSet, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, pt, data, subresources...), &appsv1.ReplicaSet{}) + Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, pt, data, subresources...), &v1.ReplicaSet{}) if obj == nil { return nil, err } - return obj.(*appsv1.ReplicaSet), err + return obj.(*v1.ReplicaSet), err } // Apply takes the given apply declarative configuration, applies it and returns the applied replicaSet. -func (c *FakeReplicaSets) Apply(ctx context.Context, replicaSet *applyconfigurationsappsv1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *appsv1.ReplicaSet, err error) { +func (c *FakeReplicaSets) Apply(ctx context.Context, replicaSet *appsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicaSet, err error) { if replicaSet == nil { return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") } @@ -160,17 +159,17 @@ func (c *FakeReplicaSets) Apply(ctx context.Context, replicaSet *applyconfigurat return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, *name, types.ApplyPatchType, data), &appsv1.ReplicaSet{}) + Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, *name, types.ApplyPatchType, data), &v1.ReplicaSet{}) if obj == nil { return nil, err } - return obj.(*appsv1.ReplicaSet), err + return obj.(*v1.ReplicaSet), err } // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeReplicaSets) ApplyStatus(ctx context.Context, replicaSet *applyconfigurationsappsv1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *appsv1.ReplicaSet, err error) { +func (c *FakeReplicaSets) ApplyStatus(ctx context.Context, replicaSet *appsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicaSet, err error) { if replicaSet == nil { return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") } @@ -183,16 +182,16 @@ func (c *FakeReplicaSets) ApplyStatus(ctx context.Context, replicaSet *applyconf return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &appsv1.ReplicaSet{}) + Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.ReplicaSet{}) if obj == nil { return nil, err } - return obj.(*appsv1.ReplicaSet), err + return obj.(*v1.ReplicaSet), err } // GetScale takes name of the replicaSet, and returns the corresponding scale object, and an error if there is any. -func (c *FakeReplicaSets) GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (result *autoscalingv1.Scale, err error) { +func (c *FakeReplicaSets) GetScale(ctx context.Context, replicaSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { obj, err := c.Fake. Invokes(testing.NewGetSubresourceAction(replicasetsResource, c.ns, "scale", replicaSetName), &autoscalingv1.Scale{}) @@ -203,7 +202,7 @@ func (c *FakeReplicaSets) GetScale(ctx context.Context, replicaSetName string, o } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeReplicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale, opts v1.UpdateOptions) (result *autoscalingv1.Scale, err error) { +func (c *FakeReplicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "scale", c.ns, scale), &autoscalingv1.Scale{}) @@ -215,7 +214,7 @@ func (c *FakeReplicaSets) UpdateScale(ctx context.Context, replicaSetName string // ApplyScale takes top resource name and the apply declarative configuration for scale, // applies it and returns the applied scale, and an error, if there is any. -func (c *FakeReplicaSets) ApplyScale(ctx context.Context, replicaSetName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *autoscalingv1.Scale, err error) { +func (c *FakeReplicaSets) ApplyScale(ctx context.Context, replicaSetName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (result *autoscalingv1.Scale, err error) { if scale == nil { return nil, fmt.Errorf("scale provided to ApplyScale must not be nil") } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go index e04eb025d4f2..f1d7d96e8d0d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go @@ -23,14 +23,13 @@ import ( json "encoding/json" "fmt" - appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/apps/v1" autoscalingv1 "k8s.io/api/autoscaling/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationsappsv1 "k8s.io/client-go/applyconfigurations/apps/v1" + appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" testing "k8s.io/client-go/testing" ) @@ -41,25 +40,25 @@ type FakeStatefulSets struct { ns string } -var statefulsetsResource = schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "statefulsets"} +var statefulsetsResource = v1.SchemeGroupVersion.WithResource("statefulsets") -var statefulsetsKind = schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "StatefulSet"} +var statefulsetsKind = v1.SchemeGroupVersion.WithKind("StatefulSet") // Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. -func (c *FakeStatefulSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *appsv1.StatefulSet, err error) { +func (c *FakeStatefulSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.StatefulSet, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(statefulsetsResource, c.ns, name), &appsv1.StatefulSet{}) + Invokes(testing.NewGetAction(statefulsetsResource, c.ns, name), &v1.StatefulSet{}) if obj == nil { return nil, err } - return obj.(*appsv1.StatefulSet), err + return obj.(*v1.StatefulSet), err } // List takes label and field selectors, and returns the list of StatefulSets that match those selectors. -func (c *FakeStatefulSets) List(ctx context.Context, opts v1.ListOptions) (result *appsv1.StatefulSetList, err error) { +func (c *FakeStatefulSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.StatefulSetList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(statefulsetsResource, statefulsetsKind, c.ns, opts), &appsv1.StatefulSetList{}) + Invokes(testing.NewListAction(statefulsetsResource, statefulsetsKind, c.ns, opts), &v1.StatefulSetList{}) if obj == nil { return nil, err @@ -69,8 +68,8 @@ func (c *FakeStatefulSets) List(ctx context.Context, opts v1.ListOptions) (resul if label == nil { label = labels.Everything() } - list := &appsv1.StatefulSetList{ListMeta: obj.(*appsv1.StatefulSetList).ListMeta} - for _, item := range obj.(*appsv1.StatefulSetList).Items { + list := &v1.StatefulSetList{ListMeta: obj.(*v1.StatefulSetList).ListMeta} + for _, item := range obj.(*v1.StatefulSetList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -79,75 +78,75 @@ func (c *FakeStatefulSets) List(ctx context.Context, opts v1.ListOptions) (resul } // Watch returns a watch.Interface that watches the requested statefulSets. -func (c *FakeStatefulSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeStatefulSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(statefulsetsResource, c.ns, opts)) } // Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *FakeStatefulSets) Create(ctx context.Context, statefulSet *appsv1.StatefulSet, opts v1.CreateOptions) (result *appsv1.StatefulSet, err error) { +func (c *FakeStatefulSets) Create(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.CreateOptions) (result *v1.StatefulSet, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(statefulsetsResource, c.ns, statefulSet), &appsv1.StatefulSet{}) + Invokes(testing.NewCreateAction(statefulsetsResource, c.ns, statefulSet), &v1.StatefulSet{}) if obj == nil { return nil, err } - return obj.(*appsv1.StatefulSet), err + return obj.(*v1.StatefulSet), err } // Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *FakeStatefulSets) Update(ctx context.Context, statefulSet *appsv1.StatefulSet, opts v1.UpdateOptions) (result *appsv1.StatefulSet, err error) { +func (c *FakeStatefulSets) Update(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (result *v1.StatefulSet, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(statefulsetsResource, c.ns, statefulSet), &appsv1.StatefulSet{}) + Invokes(testing.NewUpdateAction(statefulsetsResource, c.ns, statefulSet), &v1.StatefulSet{}) if obj == nil { return nil, err } - return obj.(*appsv1.StatefulSet), err + return obj.(*v1.StatefulSet), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *appsv1.StatefulSet, opts v1.UpdateOptions) (*appsv1.StatefulSet, error) { +func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (*v1.StatefulSet, error) { obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "status", c.ns, statefulSet), &appsv1.StatefulSet{}) + Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "status", c.ns, statefulSet), &v1.StatefulSet{}) if obj == nil { return nil, err } - return obj.(*appsv1.StatefulSet), err + return obj.(*v1.StatefulSet), err } // Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. -func (c *FakeStatefulSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeStatefulSets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(statefulsetsResource, c.ns, name, opts), &appsv1.StatefulSet{}) + Invokes(testing.NewDeleteActionWithOptions(statefulsetsResource, c.ns, name, opts), &v1.StatefulSet{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeStatefulSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeStatefulSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewDeleteCollectionAction(statefulsetsResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &appsv1.StatefulSetList{}) + _, err := c.Fake.Invokes(action, &v1.StatefulSetList{}) return err } // Patch applies the patch and returns the patched statefulSet. -func (c *FakeStatefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1.StatefulSet, err error) { +func (c *FakeStatefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StatefulSet, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, pt, data, subresources...), &appsv1.StatefulSet{}) + Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, pt, data, subresources...), &v1.StatefulSet{}) if obj == nil { return nil, err } - return obj.(*appsv1.StatefulSet), err + return obj.(*v1.StatefulSet), err } // Apply takes the given apply declarative configuration, applies it and returns the applied statefulSet. -func (c *FakeStatefulSets) Apply(ctx context.Context, statefulSet *applyconfigurationsappsv1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *appsv1.StatefulSet, err error) { +func (c *FakeStatefulSets) Apply(ctx context.Context, statefulSet *appsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StatefulSet, err error) { if statefulSet == nil { return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") } @@ -160,17 +159,17 @@ func (c *FakeStatefulSets) Apply(ctx context.Context, statefulSet *applyconfigur return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data), &appsv1.StatefulSet{}) + Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data), &v1.StatefulSet{}) if obj == nil { return nil, err } - return obj.(*appsv1.StatefulSet), err + return obj.(*v1.StatefulSet), err } // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeStatefulSets) ApplyStatus(ctx context.Context, statefulSet *applyconfigurationsappsv1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *appsv1.StatefulSet, err error) { +func (c *FakeStatefulSets) ApplyStatus(ctx context.Context, statefulSet *appsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StatefulSet, err error) { if statefulSet == nil { return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") } @@ -183,16 +182,16 @@ func (c *FakeStatefulSets) ApplyStatus(ctx context.Context, statefulSet *applyco return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &appsv1.StatefulSet{}) + Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.StatefulSet{}) if obj == nil { return nil, err } - return obj.(*appsv1.StatefulSet), err + return obj.(*v1.StatefulSet), err } // GetScale takes name of the statefulSet, and returns the corresponding scale object, and an error if there is any. -func (c *FakeStatefulSets) GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (result *autoscalingv1.Scale, err error) { +func (c *FakeStatefulSets) GetScale(ctx context.Context, statefulSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { obj, err := c.Fake. Invokes(testing.NewGetSubresourceAction(statefulsetsResource, c.ns, "scale", statefulSetName), &autoscalingv1.Scale{}) @@ -203,7 +202,7 @@ func (c *FakeStatefulSets) GetScale(ctx context.Context, statefulSetName string, } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeStatefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale, opts v1.UpdateOptions) (result *autoscalingv1.Scale, err error) { +func (c *FakeStatefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "scale", c.ns, scale), &autoscalingv1.Scale{}) @@ -215,7 +214,7 @@ func (c *FakeStatefulSets) UpdateScale(ctx context.Context, statefulSetName stri // ApplyScale takes top resource name and the apply declarative configuration for scale, // applies it and returns the applied scale, and an error, if there is any. -func (c *FakeStatefulSets) ApplyScale(ctx context.Context, statefulSetName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *autoscalingv1.Scale, err error) { +func (c *FakeStatefulSets) ApplyScale(ctx context.Context, statefulSetName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (result *autoscalingv1.Scale, err error) { if scale == nil { return nil, fmt.Errorf("scale provided to ApplyScale must not be nil") } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go index a4fb08c9036e..1954c9470350 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go @@ -26,7 +26,6 @@ import ( v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1" @@ -39,9 +38,9 @@ type FakeControllerRevisions struct { ns string } -var controllerrevisionsResource = schema.GroupVersionResource{Group: "apps", Version: "v1beta1", Resource: "controllerrevisions"} +var controllerrevisionsResource = v1beta1.SchemeGroupVersion.WithResource("controllerrevisions") -var controllerrevisionsKind = schema.GroupVersionKind{Group: "apps", Version: "v1beta1", Kind: "ControllerRevision"} +var controllerrevisionsKind = v1beta1.SchemeGroupVersion.WithKind("ControllerRevision") // Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. func (c *FakeControllerRevisions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ControllerRevision, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go index 2db29b4814a1..9614852f74d2 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go @@ -26,7 +26,6 @@ import ( v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1" @@ -39,9 +38,9 @@ type FakeDeployments struct { ns string } -var deploymentsResource = schema.GroupVersionResource{Group: "apps", Version: "v1beta1", Resource: "deployments"} +var deploymentsResource = v1beta1.SchemeGroupVersion.WithResource("deployments") -var deploymentsKind = schema.GroupVersionKind{Group: "apps", Version: "v1beta1", Kind: "Deployment"} +var deploymentsKind = v1beta1.SchemeGroupVersion.WithKind("Deployment") // Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. func (c *FakeDeployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go index b756a3239c43..2124515cfed1 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go @@ -26,7 +26,6 @@ import ( v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1" @@ -39,9 +38,9 @@ type FakeStatefulSets struct { ns string } -var statefulsetsResource = schema.GroupVersionResource{Group: "apps", Version: "v1beta1", Resource: "statefulsets"} +var statefulsetsResource = v1beta1.SchemeGroupVersion.WithResource("statefulsets") -var statefulsetsKind = schema.GroupVersionKind{Group: "apps", Version: "v1beta1", Kind: "StatefulSet"} +var statefulsetsKind = v1beta1.SchemeGroupVersion.WithKind("StatefulSet") // Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. func (c *FakeStatefulSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.StatefulSet, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go index f3d71bf77e16..1bf7fb331490 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go @@ -26,7 +26,6 @@ import ( v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" @@ -39,9 +38,9 @@ type FakeControllerRevisions struct { ns string } -var controllerrevisionsResource = schema.GroupVersionResource{Group: "apps", Version: "v1beta2", Resource: "controllerrevisions"} +var controllerrevisionsResource = v1beta2.SchemeGroupVersion.WithResource("controllerrevisions") -var controllerrevisionsKind = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "ControllerRevision"} +var controllerrevisionsKind = v1beta2.SchemeGroupVersion.WithKind("ControllerRevision") // Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. func (c *FakeControllerRevisions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.ControllerRevision, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go index 3ee0f7239c0c..8f5cfa5a8aa1 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go @@ -26,7 +26,6 @@ import ( v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" @@ -39,9 +38,9 @@ type FakeDaemonSets struct { ns string } -var daemonsetsResource = schema.GroupVersionResource{Group: "apps", Version: "v1beta2", Resource: "daemonsets"} +var daemonsetsResource = v1beta2.SchemeGroupVersion.WithResource("daemonsets") -var daemonsetsKind = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "DaemonSet"} +var daemonsetsKind = v1beta2.SchemeGroupVersion.WithKind("DaemonSet") // Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. func (c *FakeDaemonSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.DaemonSet, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go index 38fb2a67c71b..c9e8ab48bb36 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go @@ -26,7 +26,6 @@ import ( v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" @@ -39,9 +38,9 @@ type FakeDeployments struct { ns string } -var deploymentsResource = schema.GroupVersionResource{Group: "apps", Version: "v1beta2", Resource: "deployments"} +var deploymentsResource = v1beta2.SchemeGroupVersion.WithResource("deployments") -var deploymentsKind = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "Deployment"} +var deploymentsKind = v1beta2.SchemeGroupVersion.WithKind("Deployment") // Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. func (c *FakeDeployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.Deployment, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go index 2fb89cee07e6..46e1a78a7af4 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go @@ -26,7 +26,6 @@ import ( v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" @@ -39,9 +38,9 @@ type FakeReplicaSets struct { ns string } -var replicasetsResource = schema.GroupVersionResource{Group: "apps", Version: "v1beta2", Resource: "replicasets"} +var replicasetsResource = v1beta2.SchemeGroupVersion.WithResource("replicasets") -var replicasetsKind = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "ReplicaSet"} +var replicasetsKind = v1beta2.SchemeGroupVersion.WithKind("ReplicaSet") // Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. func (c *FakeReplicaSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.ReplicaSet, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go index cc9903c0f5d1..684f79925672 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go @@ -26,7 +26,6 @@ import ( v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" @@ -39,9 +38,9 @@ type FakeStatefulSets struct { ns string } -var statefulsetsResource = schema.GroupVersionResource{Group: "apps", Version: "v1beta2", Resource: "statefulsets"} +var statefulsetsResource = v1beta2.SchemeGroupVersion.WithResource("statefulsets") -var statefulsetsKind = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "StatefulSet"} +var statefulsetsKind = v1beta2.SchemeGroupVersion.WithKind("StatefulSet") // Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. func (c *FakeStatefulSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.StatefulSet, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go index b85fcfbb87d6..500e87d065fa 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go @@ -23,7 +23,6 @@ import ( v1 "k8s.io/api/authentication/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - schema "k8s.io/apimachinery/pkg/runtime/schema" testing "k8s.io/client-go/testing" ) @@ -32,9 +31,9 @@ type FakeTokenReviews struct { Fake *FakeAuthenticationV1 } -var tokenreviewsResource = schema.GroupVersionResource{Group: "authentication.k8s.io", Version: "v1", Resource: "tokenreviews"} +var tokenreviewsResource = v1.SchemeGroupVersion.WithResource("tokenreviews") -var tokenreviewsKind = schema.GroupVersionKind{Group: "authentication.k8s.io", Version: "v1", Kind: "TokenReview"} +var tokenreviewsKind = v1.SchemeGroupVersion.WithKind("TokenReview") // Create takes the representation of a tokenReview and creates it. Returns the server's representation of the tokenReview, and an error, if there is any. func (c *FakeTokenReviews) Create(ctx context.Context, tokenReview *v1.TokenReview, opts metav1.CreateOptions) (result *v1.TokenReview, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/fake_selfsubjectreview.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/fake_selfsubjectreview.go index 6bb1c2508ed6..a20b3dd764bb 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/fake_selfsubjectreview.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/fake_selfsubjectreview.go @@ -23,7 +23,6 @@ import ( v1alpha1 "k8s.io/api/authentication/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - schema "k8s.io/apimachinery/pkg/runtime/schema" testing "k8s.io/client-go/testing" ) @@ -32,9 +31,9 @@ type FakeSelfSubjectReviews struct { Fake *FakeAuthenticationV1alpha1 } -var selfsubjectreviewsResource = schema.GroupVersionResource{Group: "authentication.k8s.io", Version: "v1alpha1", Resource: "selfsubjectreviews"} +var selfsubjectreviewsResource = v1alpha1.SchemeGroupVersion.WithResource("selfsubjectreviews") -var selfsubjectreviewsKind = schema.GroupVersionKind{Group: "authentication.k8s.io", Version: "v1alpha1", Kind: "SelfSubjectReview"} +var selfsubjectreviewsKind = v1alpha1.SchemeGroupVersion.WithKind("SelfSubjectReview") // Create takes the representation of a selfSubjectReview and creates it. Returns the server's representation of the selfSubjectReview, and an error, if there is any. func (c *FakeSelfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1alpha1.SelfSubjectReview, opts v1.CreateOptions) (result *v1alpha1.SelfSubjectReview, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go index 0da3ec6f437f..b1988a67a39c 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go @@ -23,7 +23,6 @@ import ( v1beta1 "k8s.io/api/authentication/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - schema "k8s.io/apimachinery/pkg/runtime/schema" testing "k8s.io/client-go/testing" ) @@ -32,9 +31,9 @@ type FakeTokenReviews struct { Fake *FakeAuthenticationV1beta1 } -var tokenreviewsResource = schema.GroupVersionResource{Group: "authentication.k8s.io", Version: "v1beta1", Resource: "tokenreviews"} +var tokenreviewsResource = v1beta1.SchemeGroupVersion.WithResource("tokenreviews") -var tokenreviewsKind = schema.GroupVersionKind{Group: "authentication.k8s.io", Version: "v1beta1", Kind: "TokenReview"} +var tokenreviewsKind = v1beta1.SchemeGroupVersion.WithKind("TokenReview") // Create takes the representation of a tokenReview and creates it. Returns the server's representation of the tokenReview, and an error, if there is any. func (c *FakeTokenReviews) Create(ctx context.Context, tokenReview *v1beta1.TokenReview, opts v1.CreateOptions) (result *v1beta1.TokenReview, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go index d74ae0a474df..43ea05328c8a 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go @@ -23,7 +23,6 @@ import ( v1 "k8s.io/api/authorization/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - schema "k8s.io/apimachinery/pkg/runtime/schema" testing "k8s.io/client-go/testing" ) @@ -33,9 +32,9 @@ type FakeLocalSubjectAccessReviews struct { ns string } -var localsubjectaccessreviewsResource = schema.GroupVersionResource{Group: "authorization.k8s.io", Version: "v1", Resource: "localsubjectaccessreviews"} +var localsubjectaccessreviewsResource = v1.SchemeGroupVersion.WithResource("localsubjectaccessreviews") -var localsubjectaccessreviewsKind = schema.GroupVersionKind{Group: "authorization.k8s.io", Version: "v1", Kind: "LocalSubjectAccessReview"} +var localsubjectaccessreviewsKind = v1.SchemeGroupVersion.WithKind("LocalSubjectAccessReview") // Create takes the representation of a localSubjectAccessReview and creates it. Returns the server's representation of the localSubjectAccessReview, and an error, if there is any. func (c *FakeLocalSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1.LocalSubjectAccessReview, opts metav1.CreateOptions) (result *v1.LocalSubjectAccessReview, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go index 80ebbbd45f34..27642266d6af 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go @@ -23,7 +23,6 @@ import ( v1 "k8s.io/api/authorization/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - schema "k8s.io/apimachinery/pkg/runtime/schema" testing "k8s.io/client-go/testing" ) @@ -32,9 +31,9 @@ type FakeSelfSubjectAccessReviews struct { Fake *FakeAuthorizationV1 } -var selfsubjectaccessreviewsResource = schema.GroupVersionResource{Group: "authorization.k8s.io", Version: "v1", Resource: "selfsubjectaccessreviews"} +var selfsubjectaccessreviewsResource = v1.SchemeGroupVersion.WithResource("selfsubjectaccessreviews") -var selfsubjectaccessreviewsKind = schema.GroupVersionKind{Group: "authorization.k8s.io", Version: "v1", Kind: "SelfSubjectAccessReview"} +var selfsubjectaccessreviewsKind = v1.SchemeGroupVersion.WithKind("SelfSubjectAccessReview") // Create takes the representation of a selfSubjectAccessReview and creates it. Returns the server's representation of the selfSubjectAccessReview, and an error, if there is any. func (c *FakeSelfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1.SelfSubjectAccessReview, opts metav1.CreateOptions) (result *v1.SelfSubjectAccessReview, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go index dd70908ad31f..cd6c682d16b1 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go @@ -23,7 +23,6 @@ import ( v1 "k8s.io/api/authorization/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - schema "k8s.io/apimachinery/pkg/runtime/schema" testing "k8s.io/client-go/testing" ) @@ -32,9 +31,9 @@ type FakeSelfSubjectRulesReviews struct { Fake *FakeAuthorizationV1 } -var selfsubjectrulesreviewsResource = schema.GroupVersionResource{Group: "authorization.k8s.io", Version: "v1", Resource: "selfsubjectrulesreviews"} +var selfsubjectrulesreviewsResource = v1.SchemeGroupVersion.WithResource("selfsubjectrulesreviews") -var selfsubjectrulesreviewsKind = schema.GroupVersionKind{Group: "authorization.k8s.io", Version: "v1", Kind: "SelfSubjectRulesReview"} +var selfsubjectrulesreviewsKind = v1.SchemeGroupVersion.WithKind("SelfSubjectRulesReview") // Create takes the representation of a selfSubjectRulesReview and creates it. Returns the server's representation of the selfSubjectRulesReview, and an error, if there is any. func (c *FakeSelfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1.SelfSubjectRulesReview, opts metav1.CreateOptions) (result *v1.SelfSubjectRulesReview, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go index b480b2b41873..09dab6480769 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go @@ -23,7 +23,6 @@ import ( v1 "k8s.io/api/authorization/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - schema "k8s.io/apimachinery/pkg/runtime/schema" testing "k8s.io/client-go/testing" ) @@ -32,9 +31,9 @@ type FakeSubjectAccessReviews struct { Fake *FakeAuthorizationV1 } -var subjectaccessreviewsResource = schema.GroupVersionResource{Group: "authorization.k8s.io", Version: "v1", Resource: "subjectaccessreviews"} +var subjectaccessreviewsResource = v1.SchemeGroupVersion.WithResource("subjectaccessreviews") -var subjectaccessreviewsKind = schema.GroupVersionKind{Group: "authorization.k8s.io", Version: "v1", Kind: "SubjectAccessReview"} +var subjectaccessreviewsKind = v1.SchemeGroupVersion.WithKind("SubjectAccessReview") // Create takes the representation of a subjectAccessReview and creates it. Returns the server's representation of the subjectAccessReview, and an error, if there is any. func (c *FakeSubjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1.SubjectAccessReview, opts metav1.CreateOptions) (result *v1.SubjectAccessReview, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go index 2d3ba4462832..104e979d19e4 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go @@ -23,7 +23,6 @@ import ( v1beta1 "k8s.io/api/authorization/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - schema "k8s.io/apimachinery/pkg/runtime/schema" testing "k8s.io/client-go/testing" ) @@ -33,9 +32,9 @@ type FakeLocalSubjectAccessReviews struct { ns string } -var localsubjectaccessreviewsResource = schema.GroupVersionResource{Group: "authorization.k8s.io", Version: "v1beta1", Resource: "localsubjectaccessreviews"} +var localsubjectaccessreviewsResource = v1beta1.SchemeGroupVersion.WithResource("localsubjectaccessreviews") -var localsubjectaccessreviewsKind = schema.GroupVersionKind{Group: "authorization.k8s.io", Version: "v1beta1", Kind: "LocalSubjectAccessReview"} +var localsubjectaccessreviewsKind = v1beta1.SchemeGroupVersion.WithKind("LocalSubjectAccessReview") // Create takes the representation of a localSubjectAccessReview and creates it. Returns the server's representation of the localSubjectAccessReview, and an error, if there is any. func (c *FakeLocalSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1beta1.LocalSubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.LocalSubjectAccessReview, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go index febe90c77a08..517e48b76077 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go @@ -23,7 +23,6 @@ import ( v1beta1 "k8s.io/api/authorization/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - schema "k8s.io/apimachinery/pkg/runtime/schema" testing "k8s.io/client-go/testing" ) @@ -32,9 +31,9 @@ type FakeSelfSubjectAccessReviews struct { Fake *FakeAuthorizationV1beta1 } -var selfsubjectaccessreviewsResource = schema.GroupVersionResource{Group: "authorization.k8s.io", Version: "v1beta1", Resource: "selfsubjectaccessreviews"} +var selfsubjectaccessreviewsResource = v1beta1.SchemeGroupVersion.WithResource("selfsubjectaccessreviews") -var selfsubjectaccessreviewsKind = schema.GroupVersionKind{Group: "authorization.k8s.io", Version: "v1beta1", Kind: "SelfSubjectAccessReview"} +var selfsubjectaccessreviewsKind = v1beta1.SchemeGroupVersion.WithKind("SelfSubjectAccessReview") // Create takes the representation of a selfSubjectAccessReview and creates it. Returns the server's representation of the selfSubjectAccessReview, and an error, if there is any. func (c *FakeSelfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1beta1.SelfSubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectAccessReview, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go index 02df06012a7d..3aed050fcf58 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go @@ -23,7 +23,6 @@ import ( v1beta1 "k8s.io/api/authorization/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - schema "k8s.io/apimachinery/pkg/runtime/schema" testing "k8s.io/client-go/testing" ) @@ -32,9 +31,9 @@ type FakeSelfSubjectRulesReviews struct { Fake *FakeAuthorizationV1beta1 } -var selfsubjectrulesreviewsResource = schema.GroupVersionResource{Group: "authorization.k8s.io", Version: "v1beta1", Resource: "selfsubjectrulesreviews"} +var selfsubjectrulesreviewsResource = v1beta1.SchemeGroupVersion.WithResource("selfsubjectrulesreviews") -var selfsubjectrulesreviewsKind = schema.GroupVersionKind{Group: "authorization.k8s.io", Version: "v1beta1", Kind: "SelfSubjectRulesReview"} +var selfsubjectrulesreviewsKind = v1beta1.SchemeGroupVersion.WithKind("SelfSubjectRulesReview") // Create takes the representation of a selfSubjectRulesReview and creates it. Returns the server's representation of the selfSubjectRulesReview, and an error, if there is any. func (c *FakeSelfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1beta1.SelfSubjectRulesReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectRulesReview, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go index b5be913c4bba..e9bfa521a299 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go @@ -23,7 +23,6 @@ import ( v1beta1 "k8s.io/api/authorization/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - schema "k8s.io/apimachinery/pkg/runtime/schema" testing "k8s.io/client-go/testing" ) @@ -32,9 +31,9 @@ type FakeSubjectAccessReviews struct { Fake *FakeAuthorizationV1beta1 } -var subjectaccessreviewsResource = schema.GroupVersionResource{Group: "authorization.k8s.io", Version: "v1beta1", Resource: "subjectaccessreviews"} +var subjectaccessreviewsResource = v1beta1.SchemeGroupVersion.WithResource("subjectaccessreviews") -var subjectaccessreviewsKind = schema.GroupVersionKind{Group: "authorization.k8s.io", Version: "v1beta1", Kind: "SubjectAccessReview"} +var subjectaccessreviewsKind = v1beta1.SchemeGroupVersion.WithKind("SubjectAccessReview") // Create takes the representation of a subjectAccessReview and creates it. Returns the server's representation of the subjectAccessReview, and an error, if there is any. func (c *FakeSubjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1beta1.SubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.SubjectAccessReview, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go index dcd47480ba47..a2c95b753957 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - autoscalingv1 "k8s.io/api/autoscaling/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/autoscaling/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" + autoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" testing "k8s.io/client-go/testing" ) @@ -39,25 +38,25 @@ type FakeHorizontalPodAutoscalers struct { ns string } -var horizontalpodautoscalersResource = schema.GroupVersionResource{Group: "autoscaling", Version: "v1", Resource: "horizontalpodautoscalers"} +var horizontalpodautoscalersResource = v1.SchemeGroupVersion.WithResource("horizontalpodautoscalers") -var horizontalpodautoscalersKind = schema.GroupVersionKind{Group: "autoscaling", Version: "v1", Kind: "HorizontalPodAutoscaler"} +var horizontalpodautoscalersKind = v1.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler") // Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *FakeHorizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *autoscalingv1.HorizontalPodAutoscaler, err error) { +func (c *FakeHorizontalPodAutoscalers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &autoscalingv1.HorizontalPodAutoscaler{}) + Invokes(testing.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &v1.HorizontalPodAutoscaler{}) if obj == nil { return nil, err } - return obj.(*autoscalingv1.HorizontalPodAutoscaler), err + return obj.(*v1.HorizontalPodAutoscaler), err } // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *autoscalingv1.HorizontalPodAutoscalerList, err error) { +func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.HorizontalPodAutoscalerList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), &autoscalingv1.HorizontalPodAutoscalerList{}) + Invokes(testing.NewListAction(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), &v1.HorizontalPodAutoscalerList{}) if obj == nil { return nil, err @@ -67,8 +66,8 @@ func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOpt if label == nil { label = labels.Everything() } - list := &autoscalingv1.HorizontalPodAutoscalerList{ListMeta: obj.(*autoscalingv1.HorizontalPodAutoscalerList).ListMeta} - for _, item := range obj.(*autoscalingv1.HorizontalPodAutoscalerList).Items { + list := &v1.HorizontalPodAutoscalerList{ListMeta: obj.(*v1.HorizontalPodAutoscalerList).ListMeta} + for _, item := range obj.(*v1.HorizontalPodAutoscalerList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -77,75 +76,75 @@ func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOpt } // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *FakeHorizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeHorizontalPodAutoscalers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(horizontalpodautoscalersResource, c.ns, opts)) } // Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *autoscalingv1.HorizontalPodAutoscaler, err error) { +func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.CreateOptions) (result *v1.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &autoscalingv1.HorizontalPodAutoscaler{}) + Invokes(testing.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v1.HorizontalPodAutoscaler{}) if obj == nil { return nil, err } - return obj.(*autoscalingv1.HorizontalPodAutoscaler), err + return obj.(*v1.HorizontalPodAutoscaler), err } // Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *autoscalingv1.HorizontalPodAutoscaler, err error) { +func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (result *v1.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &autoscalingv1.HorizontalPodAutoscaler{}) + Invokes(testing.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v1.HorizontalPodAutoscaler{}) if obj == nil { return nil, err } - return obj.(*autoscalingv1.HorizontalPodAutoscaler), err + return obj.(*v1.HorizontalPodAutoscaler), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*autoscalingv1.HorizontalPodAutoscaler, error) { +func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (*v1.HorizontalPodAutoscaler, error) { obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &autoscalingv1.HorizontalPodAutoscaler{}) + Invokes(testing.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &v1.HorizontalPodAutoscaler{}) if obj == nil { return nil, err } - return obj.(*autoscalingv1.HorizontalPodAutoscaler), err + return obj.(*v1.HorizontalPodAutoscaler), err } // Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *FakeHorizontalPodAutoscalers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeHorizontalPodAutoscalers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(horizontalpodautoscalersResource, c.ns, name, opts), &autoscalingv1.HorizontalPodAutoscaler{}) + Invokes(testing.NewDeleteActionWithOptions(horizontalpodautoscalersResource, c.ns, name, opts), &v1.HorizontalPodAutoscaler{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &autoscalingv1.HorizontalPodAutoscalerList{}) + _, err := c.Fake.Invokes(action, &v1.HorizontalPodAutoscalerList{}) return err } // Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *FakeHorizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *autoscalingv1.HorizontalPodAutoscaler, err error) { +func (c *FakeHorizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, pt, data, subresources...), &autoscalingv1.HorizontalPodAutoscaler{}) + Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, pt, data, subresources...), &v1.HorizontalPodAutoscaler{}) if obj == nil { return nil, err } - return obj.(*autoscalingv1.HorizontalPodAutoscaler), err + return obj.(*v1.HorizontalPodAutoscaler), err } // Apply takes the given apply declarative configuration, applies it and returns the applied horizontalPodAutoscaler. -func (c *FakeHorizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodAutoscaler *applyconfigurationsautoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *autoscalingv1.HorizontalPodAutoscaler, err error) { +func (c *FakeHorizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.HorizontalPodAutoscaler, err error) { if horizontalPodAutoscaler == nil { return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") } @@ -158,17 +157,17 @@ func (c *FakeHorizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodA return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data), &autoscalingv1.HorizontalPodAutoscaler{}) + Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data), &v1.HorizontalPodAutoscaler{}) if obj == nil { return nil, err } - return obj.(*autoscalingv1.HorizontalPodAutoscaler), err + return obj.(*v1.HorizontalPodAutoscaler), err } // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeHorizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizontalPodAutoscaler *applyconfigurationsautoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *autoscalingv1.HorizontalPodAutoscaler, err error) { +func (c *FakeHorizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.HorizontalPodAutoscaler, err error) { if horizontalPodAutoscaler == nil { return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") } @@ -181,10 +180,10 @@ func (c *FakeHorizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizont return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, "status"), &autoscalingv1.HorizontalPodAutoscaler{}) + Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.HorizontalPodAutoscaler{}) if obj == nil { return nil, err } - return obj.(*autoscalingv1.HorizontalPodAutoscaler), err + return obj.(*v1.HorizontalPodAutoscaler), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_horizontalpodautoscaler.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_horizontalpodautoscaler.go index ca4b24704dba..cfcc20823240 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_horizontalpodautoscaler.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_horizontalpodautoscaler.go @@ -26,7 +26,6 @@ import ( v2 "k8s.io/api/autoscaling/v2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" autoscalingv2 "k8s.io/client-go/applyconfigurations/autoscaling/v2" @@ -39,9 +38,9 @@ type FakeHorizontalPodAutoscalers struct { ns string } -var horizontalpodautoscalersResource = schema.GroupVersionResource{Group: "autoscaling", Version: "v2", Resource: "horizontalpodautoscalers"} +var horizontalpodautoscalersResource = v2.SchemeGroupVersion.WithResource("horizontalpodautoscalers") -var horizontalpodautoscalersKind = schema.GroupVersionKind{Group: "autoscaling", Version: "v2", Kind: "HorizontalPodAutoscaler"} +var horizontalpodautoscalersKind = v2.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler") // Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. func (c *FakeHorizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.HorizontalPodAutoscaler, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go index 2a750abddfc5..0b2658e642fa 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go @@ -26,7 +26,6 @@ import ( v2beta1 "k8s.io/api/autoscaling/v2beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" autoscalingv2beta1 "k8s.io/client-go/applyconfigurations/autoscaling/v2beta1" @@ -39,9 +38,9 @@ type FakeHorizontalPodAutoscalers struct { ns string } -var horizontalpodautoscalersResource = schema.GroupVersionResource{Group: "autoscaling", Version: "v2beta1", Resource: "horizontalpodautoscalers"} +var horizontalpodautoscalersResource = v2beta1.SchemeGroupVersion.WithResource("horizontalpodautoscalers") -var horizontalpodautoscalersKind = schema.GroupVersionKind{Group: "autoscaling", Version: "v2beta1", Kind: "HorizontalPodAutoscaler"} +var horizontalpodautoscalersKind = v2beta1.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler") // Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. func (c *FakeHorizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go index 00d642d85673..0a7c93c3d3ad 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go @@ -26,7 +26,6 @@ import ( v2beta2 "k8s.io/api/autoscaling/v2beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" autoscalingv2beta2 "k8s.io/client-go/applyconfigurations/autoscaling/v2beta2" @@ -39,9 +38,9 @@ type FakeHorizontalPodAutoscalers struct { ns string } -var horizontalpodautoscalersResource = schema.GroupVersionResource{Group: "autoscaling", Version: "v2beta2", Resource: "horizontalpodautoscalers"} +var horizontalpodautoscalersResource = v2beta2.SchemeGroupVersion.WithResource("horizontalpodautoscalers") -var horizontalpodautoscalersKind = schema.GroupVersionKind{Group: "autoscaling", Version: "v2beta2", Kind: "HorizontalPodAutoscaler"} +var horizontalpodautoscalersKind = v2beta2.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler") // Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. func (c *FakeHorizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_cronjob.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_cronjob.go index 39f4ca0232d7..0cbcce6d8138 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_cronjob.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_cronjob.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - batchv1 "k8s.io/api/batch/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/batch/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationsbatchv1 "k8s.io/client-go/applyconfigurations/batch/v1" + batchv1 "k8s.io/client-go/applyconfigurations/batch/v1" testing "k8s.io/client-go/testing" ) @@ -39,25 +38,25 @@ type FakeCronJobs struct { ns string } -var cronjobsResource = schema.GroupVersionResource{Group: "batch", Version: "v1", Resource: "cronjobs"} +var cronjobsResource = v1.SchemeGroupVersion.WithResource("cronjobs") -var cronjobsKind = schema.GroupVersionKind{Group: "batch", Version: "v1", Kind: "CronJob"} +var cronjobsKind = v1.SchemeGroupVersion.WithKind("CronJob") // Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. -func (c *FakeCronJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *batchv1.CronJob, err error) { +func (c *FakeCronJobs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CronJob, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(cronjobsResource, c.ns, name), &batchv1.CronJob{}) + Invokes(testing.NewGetAction(cronjobsResource, c.ns, name), &v1.CronJob{}) if obj == nil { return nil, err } - return obj.(*batchv1.CronJob), err + return obj.(*v1.CronJob), err } // List takes label and field selectors, and returns the list of CronJobs that match those selectors. -func (c *FakeCronJobs) List(ctx context.Context, opts v1.ListOptions) (result *batchv1.CronJobList, err error) { +func (c *FakeCronJobs) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CronJobList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(cronjobsResource, cronjobsKind, c.ns, opts), &batchv1.CronJobList{}) + Invokes(testing.NewListAction(cronjobsResource, cronjobsKind, c.ns, opts), &v1.CronJobList{}) if obj == nil { return nil, err @@ -67,8 +66,8 @@ func (c *FakeCronJobs) List(ctx context.Context, opts v1.ListOptions) (result *b if label == nil { label = labels.Everything() } - list := &batchv1.CronJobList{ListMeta: obj.(*batchv1.CronJobList).ListMeta} - for _, item := range obj.(*batchv1.CronJobList).Items { + list := &v1.CronJobList{ListMeta: obj.(*v1.CronJobList).ListMeta} + for _, item := range obj.(*v1.CronJobList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -77,75 +76,75 @@ func (c *FakeCronJobs) List(ctx context.Context, opts v1.ListOptions) (result *b } // Watch returns a watch.Interface that watches the requested cronJobs. -func (c *FakeCronJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeCronJobs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(cronjobsResource, c.ns, opts)) } // Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *FakeCronJobs) Create(ctx context.Context, cronJob *batchv1.CronJob, opts v1.CreateOptions) (result *batchv1.CronJob, err error) { +func (c *FakeCronJobs) Create(ctx context.Context, cronJob *v1.CronJob, opts metav1.CreateOptions) (result *v1.CronJob, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(cronjobsResource, c.ns, cronJob), &batchv1.CronJob{}) + Invokes(testing.NewCreateAction(cronjobsResource, c.ns, cronJob), &v1.CronJob{}) if obj == nil { return nil, err } - return obj.(*batchv1.CronJob), err + return obj.(*v1.CronJob), err } // Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *FakeCronJobs) Update(ctx context.Context, cronJob *batchv1.CronJob, opts v1.UpdateOptions) (result *batchv1.CronJob, err error) { +func (c *FakeCronJobs) Update(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (result *v1.CronJob, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(cronjobsResource, c.ns, cronJob), &batchv1.CronJob{}) + Invokes(testing.NewUpdateAction(cronjobsResource, c.ns, cronJob), &v1.CronJob{}) if obj == nil { return nil, err } - return obj.(*batchv1.CronJob), err + return obj.(*v1.CronJob), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeCronJobs) UpdateStatus(ctx context.Context, cronJob *batchv1.CronJob, opts v1.UpdateOptions) (*batchv1.CronJob, error) { +func (c *FakeCronJobs) UpdateStatus(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (*v1.CronJob, error) { obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(cronjobsResource, "status", c.ns, cronJob), &batchv1.CronJob{}) + Invokes(testing.NewUpdateSubresourceAction(cronjobsResource, "status", c.ns, cronJob), &v1.CronJob{}) if obj == nil { return nil, err } - return obj.(*batchv1.CronJob), err + return obj.(*v1.CronJob), err } // Delete takes name of the cronJob and deletes it. Returns an error if one occurs. -func (c *FakeCronJobs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeCronJobs) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(cronjobsResource, c.ns, name, opts), &batchv1.CronJob{}) + Invokes(testing.NewDeleteActionWithOptions(cronjobsResource, c.ns, name, opts), &v1.CronJob{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeCronJobs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeCronJobs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewDeleteCollectionAction(cronjobsResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &batchv1.CronJobList{}) + _, err := c.Fake.Invokes(action, &v1.CronJobList{}) return err } // Patch applies the patch and returns the patched cronJob. -func (c *FakeCronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *batchv1.CronJob, err error) { +func (c *FakeCronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CronJob, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, name, pt, data, subresources...), &batchv1.CronJob{}) + Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, name, pt, data, subresources...), &v1.CronJob{}) if obj == nil { return nil, err } - return obj.(*batchv1.CronJob), err + return obj.(*v1.CronJob), err } // Apply takes the given apply declarative configuration, applies it and returns the applied cronJob. -func (c *FakeCronJobs) Apply(ctx context.Context, cronJob *applyconfigurationsbatchv1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *batchv1.CronJob, err error) { +func (c *FakeCronJobs) Apply(ctx context.Context, cronJob *batchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CronJob, err error) { if cronJob == nil { return nil, fmt.Errorf("cronJob provided to Apply must not be nil") } @@ -158,17 +157,17 @@ func (c *FakeCronJobs) Apply(ctx context.Context, cronJob *applyconfigurationsba return nil, fmt.Errorf("cronJob.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, *name, types.ApplyPatchType, data), &batchv1.CronJob{}) + Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, *name, types.ApplyPatchType, data), &v1.CronJob{}) if obj == nil { return nil, err } - return obj.(*batchv1.CronJob), err + return obj.(*v1.CronJob), err } // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeCronJobs) ApplyStatus(ctx context.Context, cronJob *applyconfigurationsbatchv1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *batchv1.CronJob, err error) { +func (c *FakeCronJobs) ApplyStatus(ctx context.Context, cronJob *batchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CronJob, err error) { if cronJob == nil { return nil, fmt.Errorf("cronJob provided to Apply must not be nil") } @@ -181,10 +180,10 @@ func (c *FakeCronJobs) ApplyStatus(ctx context.Context, cronJob *applyconfigurat return nil, fmt.Errorf("cronJob.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &batchv1.CronJob{}) + Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.CronJob{}) if obj == nil { return nil, err } - return obj.(*batchv1.CronJob), err + return obj.(*v1.CronJob), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go index c44cddb3c0ec..cf1a913bdf09 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - batchv1 "k8s.io/api/batch/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/batch/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationsbatchv1 "k8s.io/client-go/applyconfigurations/batch/v1" + batchv1 "k8s.io/client-go/applyconfigurations/batch/v1" testing "k8s.io/client-go/testing" ) @@ -39,25 +38,25 @@ type FakeJobs struct { ns string } -var jobsResource = schema.GroupVersionResource{Group: "batch", Version: "v1", Resource: "jobs"} +var jobsResource = v1.SchemeGroupVersion.WithResource("jobs") -var jobsKind = schema.GroupVersionKind{Group: "batch", Version: "v1", Kind: "Job"} +var jobsKind = v1.SchemeGroupVersion.WithKind("Job") // Get takes name of the job, and returns the corresponding job object, and an error if there is any. -func (c *FakeJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *batchv1.Job, err error) { +func (c *FakeJobs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Job, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(jobsResource, c.ns, name), &batchv1.Job{}) + Invokes(testing.NewGetAction(jobsResource, c.ns, name), &v1.Job{}) if obj == nil { return nil, err } - return obj.(*batchv1.Job), err + return obj.(*v1.Job), err } // List takes label and field selectors, and returns the list of Jobs that match those selectors. -func (c *FakeJobs) List(ctx context.Context, opts v1.ListOptions) (result *batchv1.JobList, err error) { +func (c *FakeJobs) List(ctx context.Context, opts metav1.ListOptions) (result *v1.JobList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(jobsResource, jobsKind, c.ns, opts), &batchv1.JobList{}) + Invokes(testing.NewListAction(jobsResource, jobsKind, c.ns, opts), &v1.JobList{}) if obj == nil { return nil, err @@ -67,8 +66,8 @@ func (c *FakeJobs) List(ctx context.Context, opts v1.ListOptions) (result *batch if label == nil { label = labels.Everything() } - list := &batchv1.JobList{ListMeta: obj.(*batchv1.JobList).ListMeta} - for _, item := range obj.(*batchv1.JobList).Items { + list := &v1.JobList{ListMeta: obj.(*v1.JobList).ListMeta} + for _, item := range obj.(*v1.JobList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -77,75 +76,75 @@ func (c *FakeJobs) List(ctx context.Context, opts v1.ListOptions) (result *batch } // Watch returns a watch.Interface that watches the requested jobs. -func (c *FakeJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeJobs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(jobsResource, c.ns, opts)) } // Create takes the representation of a job and creates it. Returns the server's representation of the job, and an error, if there is any. -func (c *FakeJobs) Create(ctx context.Context, job *batchv1.Job, opts v1.CreateOptions) (result *batchv1.Job, err error) { +func (c *FakeJobs) Create(ctx context.Context, job *v1.Job, opts metav1.CreateOptions) (result *v1.Job, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(jobsResource, c.ns, job), &batchv1.Job{}) + Invokes(testing.NewCreateAction(jobsResource, c.ns, job), &v1.Job{}) if obj == nil { return nil, err } - return obj.(*batchv1.Job), err + return obj.(*v1.Job), err } // Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any. -func (c *FakeJobs) Update(ctx context.Context, job *batchv1.Job, opts v1.UpdateOptions) (result *batchv1.Job, err error) { +func (c *FakeJobs) Update(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (result *v1.Job, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(jobsResource, c.ns, job), &batchv1.Job{}) + Invokes(testing.NewUpdateAction(jobsResource, c.ns, job), &v1.Job{}) if obj == nil { return nil, err } - return obj.(*batchv1.Job), err + return obj.(*v1.Job), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeJobs) UpdateStatus(ctx context.Context, job *batchv1.Job, opts v1.UpdateOptions) (*batchv1.Job, error) { +func (c *FakeJobs) UpdateStatus(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (*v1.Job, error) { obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(jobsResource, "status", c.ns, job), &batchv1.Job{}) + Invokes(testing.NewUpdateSubresourceAction(jobsResource, "status", c.ns, job), &v1.Job{}) if obj == nil { return nil, err } - return obj.(*batchv1.Job), err + return obj.(*v1.Job), err } // Delete takes name of the job and deletes it. Returns an error if one occurs. -func (c *FakeJobs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeJobs) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(jobsResource, c.ns, name, opts), &batchv1.Job{}) + Invokes(testing.NewDeleteActionWithOptions(jobsResource, c.ns, name, opts), &v1.Job{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeJobs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeJobs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewDeleteCollectionAction(jobsResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &batchv1.JobList{}) + _, err := c.Fake.Invokes(action, &v1.JobList{}) return err } // Patch applies the patch and returns the patched job. -func (c *FakeJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *batchv1.Job, err error) { +func (c *FakeJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Job, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(jobsResource, c.ns, name, pt, data, subresources...), &batchv1.Job{}) + Invokes(testing.NewPatchSubresourceAction(jobsResource, c.ns, name, pt, data, subresources...), &v1.Job{}) if obj == nil { return nil, err } - return obj.(*batchv1.Job), err + return obj.(*v1.Job), err } // Apply takes the given apply declarative configuration, applies it and returns the applied job. -func (c *FakeJobs) Apply(ctx context.Context, job *applyconfigurationsbatchv1.JobApplyConfiguration, opts v1.ApplyOptions) (result *batchv1.Job, err error) { +func (c *FakeJobs) Apply(ctx context.Context, job *batchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Job, err error) { if job == nil { return nil, fmt.Errorf("job provided to Apply must not be nil") } @@ -158,17 +157,17 @@ func (c *FakeJobs) Apply(ctx context.Context, job *applyconfigurationsbatchv1.Jo return nil, fmt.Errorf("job.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(jobsResource, c.ns, *name, types.ApplyPatchType, data), &batchv1.Job{}) + Invokes(testing.NewPatchSubresourceAction(jobsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Job{}) if obj == nil { return nil, err } - return obj.(*batchv1.Job), err + return obj.(*v1.Job), err } // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeJobs) ApplyStatus(ctx context.Context, job *applyconfigurationsbatchv1.JobApplyConfiguration, opts v1.ApplyOptions) (result *batchv1.Job, err error) { +func (c *FakeJobs) ApplyStatus(ctx context.Context, job *batchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Job, err error) { if job == nil { return nil, fmt.Errorf("job provided to Apply must not be nil") } @@ -181,10 +180,10 @@ func (c *FakeJobs) ApplyStatus(ctx context.Context, job *applyconfigurationsbatc return nil, fmt.Errorf("job.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(jobsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &batchv1.Job{}) + Invokes(testing.NewPatchSubresourceAction(jobsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.Job{}) if obj == nil { return nil, err } - return obj.(*batchv1.Job), err + return obj.(*v1.Job), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go index a8bad764f585..9d078f55a9fd 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go @@ -26,7 +26,6 @@ import ( v1beta1 "k8s.io/api/batch/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" batchv1beta1 "k8s.io/client-go/applyconfigurations/batch/v1beta1" @@ -39,9 +38,9 @@ type FakeCronJobs struct { ns string } -var cronjobsResource = schema.GroupVersionResource{Group: "batch", Version: "v1beta1", Resource: "cronjobs"} +var cronjobsResource = v1beta1.SchemeGroupVersion.WithResource("cronjobs") -var cronjobsKind = schema.GroupVersionKind{Group: "batch", Version: "v1beta1", Kind: "CronJob"} +var cronjobsKind = v1beta1.SchemeGroupVersion.WithKind("CronJob") // Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. func (c *FakeCronJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CronJob, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/fake_certificatesigningrequest.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/fake_certificatesigningrequest.go index 90a1e6dc66ab..adb7db0bf680 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/fake_certificatesigningrequest.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/fake_certificatesigningrequest.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - certificatesv1 "k8s.io/api/certificates/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/certificates/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationscertificatesv1 "k8s.io/client-go/applyconfigurations/certificates/v1" + certificatesv1 "k8s.io/client-go/applyconfigurations/certificates/v1" testing "k8s.io/client-go/testing" ) @@ -38,24 +37,24 @@ type FakeCertificateSigningRequests struct { Fake *FakeCertificatesV1 } -var certificatesigningrequestsResource = schema.GroupVersionResource{Group: "certificates.k8s.io", Version: "v1", Resource: "certificatesigningrequests"} +var certificatesigningrequestsResource = v1.SchemeGroupVersion.WithResource("certificatesigningrequests") -var certificatesigningrequestsKind = schema.GroupVersionKind{Group: "certificates.k8s.io", Version: "v1", Kind: "CertificateSigningRequest"} +var certificatesigningrequestsKind = v1.SchemeGroupVersion.WithKind("CertificateSigningRequest") // Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any. -func (c *FakeCertificateSigningRequests) Get(ctx context.Context, name string, options v1.GetOptions) (result *certificatesv1.CertificateSigningRequest, err error) { +func (c *FakeCertificateSigningRequests) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CertificateSigningRequest, err error) { obj, err := c.Fake. - Invokes(testing.NewRootGetAction(certificatesigningrequestsResource, name), &certificatesv1.CertificateSigningRequest{}) + Invokes(testing.NewRootGetAction(certificatesigningrequestsResource, name), &v1.CertificateSigningRequest{}) if obj == nil { return nil, err } - return obj.(*certificatesv1.CertificateSigningRequest), err + return obj.(*v1.CertificateSigningRequest), err } // List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. -func (c *FakeCertificateSigningRequests) List(ctx context.Context, opts v1.ListOptions) (result *certificatesv1.CertificateSigningRequestList, err error) { +func (c *FakeCertificateSigningRequests) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CertificateSigningRequestList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootListAction(certificatesigningrequestsResource, certificatesigningrequestsKind, opts), &certificatesv1.CertificateSigningRequestList{}) + Invokes(testing.NewRootListAction(certificatesigningrequestsResource, certificatesigningrequestsKind, opts), &v1.CertificateSigningRequestList{}) if obj == nil { return nil, err } @@ -64,8 +63,8 @@ func (c *FakeCertificateSigningRequests) List(ctx context.Context, opts v1.ListO if label == nil { label = labels.Everything() } - list := &certificatesv1.CertificateSigningRequestList{ListMeta: obj.(*certificatesv1.CertificateSigningRequestList).ListMeta} - for _, item := range obj.(*certificatesv1.CertificateSigningRequestList).Items { + list := &v1.CertificateSigningRequestList{ListMeta: obj.(*v1.CertificateSigningRequestList).ListMeta} + for _, item := range obj.(*v1.CertificateSigningRequestList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -74,69 +73,69 @@ func (c *FakeCertificateSigningRequests) List(ctx context.Context, opts v1.ListO } // Watch returns a watch.Interface that watches the requested certificateSigningRequests. -func (c *FakeCertificateSigningRequests) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeCertificateSigningRequests) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(certificatesigningrequestsResource, opts)) } // Create takes the representation of a certificateSigningRequest and creates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *FakeCertificateSigningRequests) Create(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequest, opts v1.CreateOptions) (result *certificatesv1.CertificateSigningRequest, err error) { +func (c *FakeCertificateSigningRequests) Create(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.CreateOptions) (result *v1.CertificateSigningRequest, err error) { obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(certificatesigningrequestsResource, certificateSigningRequest), &certificatesv1.CertificateSigningRequest{}) + Invokes(testing.NewRootCreateAction(certificatesigningrequestsResource, certificateSigningRequest), &v1.CertificateSigningRequest{}) if obj == nil { return nil, err } - return obj.(*certificatesv1.CertificateSigningRequest), err + return obj.(*v1.CertificateSigningRequest), err } // Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *FakeCertificateSigningRequests) Update(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequest, opts v1.UpdateOptions) (result *certificatesv1.CertificateSigningRequest, err error) { +func (c *FakeCertificateSigningRequests) Update(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *v1.CertificateSigningRequest, err error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(certificatesigningrequestsResource, certificateSigningRequest), &certificatesv1.CertificateSigningRequest{}) + Invokes(testing.NewRootUpdateAction(certificatesigningrequestsResource, certificateSigningRequest), &v1.CertificateSigningRequest{}) if obj == nil { return nil, err } - return obj.(*certificatesv1.CertificateSigningRequest), err + return obj.(*v1.CertificateSigningRequest), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeCertificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequest, opts v1.UpdateOptions) (*certificatesv1.CertificateSigningRequest, error) { +func (c *FakeCertificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (*v1.CertificateSigningRequest, error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(certificatesigningrequestsResource, "status", certificateSigningRequest), &certificatesv1.CertificateSigningRequest{}) + Invokes(testing.NewRootUpdateSubresourceAction(certificatesigningrequestsResource, "status", certificateSigningRequest), &v1.CertificateSigningRequest{}) if obj == nil { return nil, err } - return obj.(*certificatesv1.CertificateSigningRequest), err + return obj.(*v1.CertificateSigningRequest), err } // Delete takes name of the certificateSigningRequest and deletes it. Returns an error if one occurs. -func (c *FakeCertificateSigningRequests) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeCertificateSigningRequests) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(certificatesigningrequestsResource, name, opts), &certificatesv1.CertificateSigningRequest{}) + Invokes(testing.NewRootDeleteActionWithOptions(certificatesigningrequestsResource, name, opts), &v1.CertificateSigningRequest{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeCertificateSigningRequests) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeCertificateSigningRequests) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(certificatesigningrequestsResource, listOpts) - _, err := c.Fake.Invokes(action, &certificatesv1.CertificateSigningRequestList{}) + _, err := c.Fake.Invokes(action, &v1.CertificateSigningRequestList{}) return err } // Patch applies the patch and returns the patched certificateSigningRequest. -func (c *FakeCertificateSigningRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *certificatesv1.CertificateSigningRequest, err error) { +func (c *FakeCertificateSigningRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CertificateSigningRequest, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, name, pt, data, subresources...), &certificatesv1.CertificateSigningRequest{}) + Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, name, pt, data, subresources...), &v1.CertificateSigningRequest{}) if obj == nil { return nil, err } - return obj.(*certificatesv1.CertificateSigningRequest), err + return obj.(*v1.CertificateSigningRequest), err } // Apply takes the given apply declarative configuration, applies it and returns the applied certificateSigningRequest. -func (c *FakeCertificateSigningRequests) Apply(ctx context.Context, certificateSigningRequest *applyconfigurationscertificatesv1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *certificatesv1.CertificateSigningRequest, err error) { +func (c *FakeCertificateSigningRequests) Apply(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CertificateSigningRequest, err error) { if certificateSigningRequest == nil { return nil, fmt.Errorf("certificateSigningRequest provided to Apply must not be nil") } @@ -149,16 +148,16 @@ func (c *FakeCertificateSigningRequests) Apply(ctx context.Context, certificateS return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, *name, types.ApplyPatchType, data), &certificatesv1.CertificateSigningRequest{}) + Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, *name, types.ApplyPatchType, data), &v1.CertificateSigningRequest{}) if obj == nil { return nil, err } - return obj.(*certificatesv1.CertificateSigningRequest), err + return obj.(*v1.CertificateSigningRequest), err } // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeCertificateSigningRequests) ApplyStatus(ctx context.Context, certificateSigningRequest *applyconfigurationscertificatesv1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *certificatesv1.CertificateSigningRequest, err error) { +func (c *FakeCertificateSigningRequests) ApplyStatus(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CertificateSigningRequest, err error) { if certificateSigningRequest == nil { return nil, fmt.Errorf("certificateSigningRequest provided to Apply must not be nil") } @@ -171,19 +170,19 @@ func (c *FakeCertificateSigningRequests) ApplyStatus(ctx context.Context, certif return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, *name, types.ApplyPatchType, data, "status"), &certificatesv1.CertificateSigningRequest{}) + Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, *name, types.ApplyPatchType, data, "status"), &v1.CertificateSigningRequest{}) if obj == nil { return nil, err } - return obj.(*certificatesv1.CertificateSigningRequest), err + return obj.(*v1.CertificateSigningRequest), err } // UpdateApproval takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *FakeCertificateSigningRequests) UpdateApproval(ctx context.Context, certificateSigningRequestName string, certificateSigningRequest *certificatesv1.CertificateSigningRequest, opts v1.UpdateOptions) (result *certificatesv1.CertificateSigningRequest, err error) { +func (c *FakeCertificateSigningRequests) UpdateApproval(ctx context.Context, certificateSigningRequestName string, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *v1.CertificateSigningRequest, err error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(certificatesigningrequestsResource, "approval", certificateSigningRequest), &certificatesv1.CertificateSigningRequest{}) + Invokes(testing.NewRootUpdateSubresourceAction(certificatesigningrequestsResource, "approval", certificateSigningRequest), &v1.CertificateSigningRequest{}) if obj == nil { return nil, err } - return obj.(*certificatesv1.CertificateSigningRequest), err + return obj.(*v1.CertificateSigningRequest), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go index 5a416150a88f..76bb38e7bf7c 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go @@ -26,7 +26,6 @@ import ( v1beta1 "k8s.io/api/certificates/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" certificatesv1beta1 "k8s.io/client-go/applyconfigurations/certificates/v1beta1" @@ -38,9 +37,9 @@ type FakeCertificateSigningRequests struct { Fake *FakeCertificatesV1beta1 } -var certificatesigningrequestsResource = schema.GroupVersionResource{Group: "certificates.k8s.io", Version: "v1beta1", Resource: "certificatesigningrequests"} +var certificatesigningrequestsResource = v1beta1.SchemeGroupVersion.WithResource("certificatesigningrequests") -var certificatesigningrequestsKind = schema.GroupVersionKind{Group: "certificates.k8s.io", Version: "v1beta1", Kind: "CertificateSigningRequest"} +var certificatesigningrequestsKind = v1beta1.SchemeGroupVersion.WithKind("CertificateSigningRequest") // Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any. func (c *FakeCertificateSigningRequests) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CertificateSigningRequest, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go index c52c828ef4f4..6dc7c4c17fd7 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - coordinationv1 "k8s.io/api/coordination/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/coordination/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationscoordinationv1 "k8s.io/client-go/applyconfigurations/coordination/v1" + coordinationv1 "k8s.io/client-go/applyconfigurations/coordination/v1" testing "k8s.io/client-go/testing" ) @@ -39,25 +38,25 @@ type FakeLeases struct { ns string } -var leasesResource = schema.GroupVersionResource{Group: "coordination.k8s.io", Version: "v1", Resource: "leases"} +var leasesResource = v1.SchemeGroupVersion.WithResource("leases") -var leasesKind = schema.GroupVersionKind{Group: "coordination.k8s.io", Version: "v1", Kind: "Lease"} +var leasesKind = v1.SchemeGroupVersion.WithKind("Lease") // Get takes name of the lease, and returns the corresponding lease object, and an error if there is any. -func (c *FakeLeases) Get(ctx context.Context, name string, options v1.GetOptions) (result *coordinationv1.Lease, err error) { +func (c *FakeLeases) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Lease, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(leasesResource, c.ns, name), &coordinationv1.Lease{}) + Invokes(testing.NewGetAction(leasesResource, c.ns, name), &v1.Lease{}) if obj == nil { return nil, err } - return obj.(*coordinationv1.Lease), err + return obj.(*v1.Lease), err } // List takes label and field selectors, and returns the list of Leases that match those selectors. -func (c *FakeLeases) List(ctx context.Context, opts v1.ListOptions) (result *coordinationv1.LeaseList, err error) { +func (c *FakeLeases) List(ctx context.Context, opts metav1.ListOptions) (result *v1.LeaseList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(leasesResource, leasesKind, c.ns, opts), &coordinationv1.LeaseList{}) + Invokes(testing.NewListAction(leasesResource, leasesKind, c.ns, opts), &v1.LeaseList{}) if obj == nil { return nil, err @@ -67,8 +66,8 @@ func (c *FakeLeases) List(ctx context.Context, opts v1.ListOptions) (result *coo if label == nil { label = labels.Everything() } - list := &coordinationv1.LeaseList{ListMeta: obj.(*coordinationv1.LeaseList).ListMeta} - for _, item := range obj.(*coordinationv1.LeaseList).Items { + list := &v1.LeaseList{ListMeta: obj.(*v1.LeaseList).ListMeta} + for _, item := range obj.(*v1.LeaseList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -77,63 +76,63 @@ func (c *FakeLeases) List(ctx context.Context, opts v1.ListOptions) (result *coo } // Watch returns a watch.Interface that watches the requested leases. -func (c *FakeLeases) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeLeases) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(leasesResource, c.ns, opts)) } // Create takes the representation of a lease and creates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *FakeLeases) Create(ctx context.Context, lease *coordinationv1.Lease, opts v1.CreateOptions) (result *coordinationv1.Lease, err error) { +func (c *FakeLeases) Create(ctx context.Context, lease *v1.Lease, opts metav1.CreateOptions) (result *v1.Lease, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(leasesResource, c.ns, lease), &coordinationv1.Lease{}) + Invokes(testing.NewCreateAction(leasesResource, c.ns, lease), &v1.Lease{}) if obj == nil { return nil, err } - return obj.(*coordinationv1.Lease), err + return obj.(*v1.Lease), err } // Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *FakeLeases) Update(ctx context.Context, lease *coordinationv1.Lease, opts v1.UpdateOptions) (result *coordinationv1.Lease, err error) { +func (c *FakeLeases) Update(ctx context.Context, lease *v1.Lease, opts metav1.UpdateOptions) (result *v1.Lease, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(leasesResource, c.ns, lease), &coordinationv1.Lease{}) + Invokes(testing.NewUpdateAction(leasesResource, c.ns, lease), &v1.Lease{}) if obj == nil { return nil, err } - return obj.(*coordinationv1.Lease), err + return obj.(*v1.Lease), err } // Delete takes name of the lease and deletes it. Returns an error if one occurs. -func (c *FakeLeases) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeLeases) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(leasesResource, c.ns, name, opts), &coordinationv1.Lease{}) + Invokes(testing.NewDeleteActionWithOptions(leasesResource, c.ns, name, opts), &v1.Lease{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeLeases) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeLeases) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewDeleteCollectionAction(leasesResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &coordinationv1.LeaseList{}) + _, err := c.Fake.Invokes(action, &v1.LeaseList{}) return err } // Patch applies the patch and returns the patched lease. -func (c *FakeLeases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *coordinationv1.Lease, err error) { +func (c *FakeLeases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Lease, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(leasesResource, c.ns, name, pt, data, subresources...), &coordinationv1.Lease{}) + Invokes(testing.NewPatchSubresourceAction(leasesResource, c.ns, name, pt, data, subresources...), &v1.Lease{}) if obj == nil { return nil, err } - return obj.(*coordinationv1.Lease), err + return obj.(*v1.Lease), err } // Apply takes the given apply declarative configuration, applies it and returns the applied lease. -func (c *FakeLeases) Apply(ctx context.Context, lease *applyconfigurationscoordinationv1.LeaseApplyConfiguration, opts v1.ApplyOptions) (result *coordinationv1.Lease, err error) { +func (c *FakeLeases) Apply(ctx context.Context, lease *coordinationv1.LeaseApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Lease, err error) { if lease == nil { return nil, fmt.Errorf("lease provided to Apply must not be nil") } @@ -146,10 +145,10 @@ func (c *FakeLeases) Apply(ctx context.Context, lease *applyconfigurationscoordi return nil, fmt.Errorf("lease.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(leasesResource, c.ns, *name, types.ApplyPatchType, data), &coordinationv1.Lease{}) + Invokes(testing.NewPatchSubresourceAction(leasesResource, c.ns, *name, types.ApplyPatchType, data), &v1.Lease{}) if obj == nil { return nil, err } - return obj.(*coordinationv1.Lease), err + return obj.(*v1.Lease), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go index 15b6c401dcaa..9a4a0d7eb93d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go @@ -26,7 +26,6 @@ import ( v1beta1 "k8s.io/api/coordination/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" coordinationv1beta1 "k8s.io/client-go/applyconfigurations/coordination/v1beta1" @@ -39,9 +38,9 @@ type FakeLeases struct { ns string } -var leasesResource = schema.GroupVersionResource{Group: "coordination.k8s.io", Version: "v1beta1", Resource: "leases"} +var leasesResource = v1beta1.SchemeGroupVersion.WithResource("leases") -var leasesKind = schema.GroupVersionKind{Group: "coordination.k8s.io", Version: "v1beta1", Kind: "Lease"} +var leasesKind = v1beta1.SchemeGroupVersion.WithKind("Lease") // Get takes name of the lease, and returns the corresponding lease object, and an error if there is any. func (c *FakeLeases) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Lease, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go index 7e5c02daacc8..39d4c3282e33 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + corev1 "k8s.io/client-go/applyconfigurations/core/v1" testing "k8s.io/client-go/testing" ) @@ -38,24 +37,24 @@ type FakeComponentStatuses struct { Fake *FakeCoreV1 } -var componentstatusesResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "componentstatuses"} +var componentstatusesResource = v1.SchemeGroupVersion.WithResource("componentstatuses") -var componentstatusesKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ComponentStatus"} +var componentstatusesKind = v1.SchemeGroupVersion.WithKind("ComponentStatus") // Get takes name of the componentStatus, and returns the corresponding componentStatus object, and an error if there is any. -func (c *FakeComponentStatuses) Get(ctx context.Context, name string, options v1.GetOptions) (result *corev1.ComponentStatus, err error) { +func (c *FakeComponentStatuses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ComponentStatus, err error) { obj, err := c.Fake. - Invokes(testing.NewRootGetAction(componentstatusesResource, name), &corev1.ComponentStatus{}) + Invokes(testing.NewRootGetAction(componentstatusesResource, name), &v1.ComponentStatus{}) if obj == nil { return nil, err } - return obj.(*corev1.ComponentStatus), err + return obj.(*v1.ComponentStatus), err } // List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors. -func (c *FakeComponentStatuses) List(ctx context.Context, opts v1.ListOptions) (result *corev1.ComponentStatusList, err error) { +func (c *FakeComponentStatuses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ComponentStatusList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootListAction(componentstatusesResource, componentstatusesKind, opts), &corev1.ComponentStatusList{}) + Invokes(testing.NewRootListAction(componentstatusesResource, componentstatusesKind, opts), &v1.ComponentStatusList{}) if obj == nil { return nil, err } @@ -64,8 +63,8 @@ func (c *FakeComponentStatuses) List(ctx context.Context, opts v1.ListOptions) ( if label == nil { label = labels.Everything() } - list := &corev1.ComponentStatusList{ListMeta: obj.(*corev1.ComponentStatusList).ListMeta} - for _, item := range obj.(*corev1.ComponentStatusList).Items { + list := &v1.ComponentStatusList{ListMeta: obj.(*v1.ComponentStatusList).ListMeta} + for _, item := range obj.(*v1.ComponentStatusList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -74,58 +73,58 @@ func (c *FakeComponentStatuses) List(ctx context.Context, opts v1.ListOptions) ( } // Watch returns a watch.Interface that watches the requested componentStatuses. -func (c *FakeComponentStatuses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeComponentStatuses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(componentstatusesResource, opts)) } // Create takes the representation of a componentStatus and creates it. Returns the server's representation of the componentStatus, and an error, if there is any. -func (c *FakeComponentStatuses) Create(ctx context.Context, componentStatus *corev1.ComponentStatus, opts v1.CreateOptions) (result *corev1.ComponentStatus, err error) { +func (c *FakeComponentStatuses) Create(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.CreateOptions) (result *v1.ComponentStatus, err error) { obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(componentstatusesResource, componentStatus), &corev1.ComponentStatus{}) + Invokes(testing.NewRootCreateAction(componentstatusesResource, componentStatus), &v1.ComponentStatus{}) if obj == nil { return nil, err } - return obj.(*corev1.ComponentStatus), err + return obj.(*v1.ComponentStatus), err } // Update takes the representation of a componentStatus and updates it. Returns the server's representation of the componentStatus, and an error, if there is any. -func (c *FakeComponentStatuses) Update(ctx context.Context, componentStatus *corev1.ComponentStatus, opts v1.UpdateOptions) (result *corev1.ComponentStatus, err error) { +func (c *FakeComponentStatuses) Update(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.UpdateOptions) (result *v1.ComponentStatus, err error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(componentstatusesResource, componentStatus), &corev1.ComponentStatus{}) + Invokes(testing.NewRootUpdateAction(componentstatusesResource, componentStatus), &v1.ComponentStatus{}) if obj == nil { return nil, err } - return obj.(*corev1.ComponentStatus), err + return obj.(*v1.ComponentStatus), err } // Delete takes name of the componentStatus and deletes it. Returns an error if one occurs. -func (c *FakeComponentStatuses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeComponentStatuses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(componentstatusesResource, name, opts), &corev1.ComponentStatus{}) + Invokes(testing.NewRootDeleteActionWithOptions(componentstatusesResource, name, opts), &v1.ComponentStatus{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeComponentStatuses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeComponentStatuses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(componentstatusesResource, listOpts) - _, err := c.Fake.Invokes(action, &corev1.ComponentStatusList{}) + _, err := c.Fake.Invokes(action, &v1.ComponentStatusList{}) return err } // Patch applies the patch and returns the patched componentStatus. -func (c *FakeComponentStatuses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.ComponentStatus, err error) { +func (c *FakeComponentStatuses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ComponentStatus, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(componentstatusesResource, name, pt, data, subresources...), &corev1.ComponentStatus{}) + Invokes(testing.NewRootPatchSubresourceAction(componentstatusesResource, name, pt, data, subresources...), &v1.ComponentStatus{}) if obj == nil { return nil, err } - return obj.(*corev1.ComponentStatus), err + return obj.(*v1.ComponentStatus), err } // Apply takes the given apply declarative configuration, applies it and returns the applied componentStatus. -func (c *FakeComponentStatuses) Apply(ctx context.Context, componentStatus *applyconfigurationscorev1.ComponentStatusApplyConfiguration, opts v1.ApplyOptions) (result *corev1.ComponentStatus, err error) { +func (c *FakeComponentStatuses) Apply(ctx context.Context, componentStatus *corev1.ComponentStatusApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ComponentStatus, err error) { if componentStatus == nil { return nil, fmt.Errorf("componentStatus provided to Apply must not be nil") } @@ -138,9 +137,9 @@ func (c *FakeComponentStatuses) Apply(ctx context.Context, componentStatus *appl return nil, fmt.Errorf("componentStatus.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(componentstatusesResource, *name, types.ApplyPatchType, data), &corev1.ComponentStatus{}) + Invokes(testing.NewRootPatchSubresourceAction(componentstatusesResource, *name, types.ApplyPatchType, data), &v1.ComponentStatus{}) if obj == nil { return nil, err } - return obj.(*corev1.ComponentStatus), err + return obj.(*v1.ComponentStatus), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go index b74b376a92a0..6e8a38bd8fd3 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + corev1 "k8s.io/client-go/applyconfigurations/core/v1" testing "k8s.io/client-go/testing" ) @@ -39,25 +38,25 @@ type FakeConfigMaps struct { ns string } -var configmapsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"} +var configmapsResource = v1.SchemeGroupVersion.WithResource("configmaps") -var configmapsKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"} +var configmapsKind = v1.SchemeGroupVersion.WithKind("ConfigMap") // Get takes name of the configMap, and returns the corresponding configMap object, and an error if there is any. -func (c *FakeConfigMaps) Get(ctx context.Context, name string, options v1.GetOptions) (result *corev1.ConfigMap, err error) { +func (c *FakeConfigMaps) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ConfigMap, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(configmapsResource, c.ns, name), &corev1.ConfigMap{}) + Invokes(testing.NewGetAction(configmapsResource, c.ns, name), &v1.ConfigMap{}) if obj == nil { return nil, err } - return obj.(*corev1.ConfigMap), err + return obj.(*v1.ConfigMap), err } // List takes label and field selectors, and returns the list of ConfigMaps that match those selectors. -func (c *FakeConfigMaps) List(ctx context.Context, opts v1.ListOptions) (result *corev1.ConfigMapList, err error) { +func (c *FakeConfigMaps) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ConfigMapList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(configmapsResource, configmapsKind, c.ns, opts), &corev1.ConfigMapList{}) + Invokes(testing.NewListAction(configmapsResource, configmapsKind, c.ns, opts), &v1.ConfigMapList{}) if obj == nil { return nil, err @@ -67,8 +66,8 @@ func (c *FakeConfigMaps) List(ctx context.Context, opts v1.ListOptions) (result if label == nil { label = labels.Everything() } - list := &corev1.ConfigMapList{ListMeta: obj.(*corev1.ConfigMapList).ListMeta} - for _, item := range obj.(*corev1.ConfigMapList).Items { + list := &v1.ConfigMapList{ListMeta: obj.(*v1.ConfigMapList).ListMeta} + for _, item := range obj.(*v1.ConfigMapList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -77,63 +76,63 @@ func (c *FakeConfigMaps) List(ctx context.Context, opts v1.ListOptions) (result } // Watch returns a watch.Interface that watches the requested configMaps. -func (c *FakeConfigMaps) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeConfigMaps) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(configmapsResource, c.ns, opts)) } // Create takes the representation of a configMap and creates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *FakeConfigMaps) Create(ctx context.Context, configMap *corev1.ConfigMap, opts v1.CreateOptions) (result *corev1.ConfigMap, err error) { +func (c *FakeConfigMaps) Create(ctx context.Context, configMap *v1.ConfigMap, opts metav1.CreateOptions) (result *v1.ConfigMap, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(configmapsResource, c.ns, configMap), &corev1.ConfigMap{}) + Invokes(testing.NewCreateAction(configmapsResource, c.ns, configMap), &v1.ConfigMap{}) if obj == nil { return nil, err } - return obj.(*corev1.ConfigMap), err + return obj.(*v1.ConfigMap), err } // Update takes the representation of a configMap and updates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *FakeConfigMaps) Update(ctx context.Context, configMap *corev1.ConfigMap, opts v1.UpdateOptions) (result *corev1.ConfigMap, err error) { +func (c *FakeConfigMaps) Update(ctx context.Context, configMap *v1.ConfigMap, opts metav1.UpdateOptions) (result *v1.ConfigMap, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(configmapsResource, c.ns, configMap), &corev1.ConfigMap{}) + Invokes(testing.NewUpdateAction(configmapsResource, c.ns, configMap), &v1.ConfigMap{}) if obj == nil { return nil, err } - return obj.(*corev1.ConfigMap), err + return obj.(*v1.ConfigMap), err } // Delete takes name of the configMap and deletes it. Returns an error if one occurs. -func (c *FakeConfigMaps) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeConfigMaps) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(configmapsResource, c.ns, name, opts), &corev1.ConfigMap{}) + Invokes(testing.NewDeleteActionWithOptions(configmapsResource, c.ns, name, opts), &v1.ConfigMap{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeConfigMaps) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeConfigMaps) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewDeleteCollectionAction(configmapsResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &corev1.ConfigMapList{}) + _, err := c.Fake.Invokes(action, &v1.ConfigMapList{}) return err } // Patch applies the patch and returns the patched configMap. -func (c *FakeConfigMaps) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.ConfigMap, err error) { +func (c *FakeConfigMaps) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ConfigMap, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(configmapsResource, c.ns, name, pt, data, subresources...), &corev1.ConfigMap{}) + Invokes(testing.NewPatchSubresourceAction(configmapsResource, c.ns, name, pt, data, subresources...), &v1.ConfigMap{}) if obj == nil { return nil, err } - return obj.(*corev1.ConfigMap), err + return obj.(*v1.ConfigMap), err } // Apply takes the given apply declarative configuration, applies it and returns the applied configMap. -func (c *FakeConfigMaps) Apply(ctx context.Context, configMap *applyconfigurationscorev1.ConfigMapApplyConfiguration, opts v1.ApplyOptions) (result *corev1.ConfigMap, err error) { +func (c *FakeConfigMaps) Apply(ctx context.Context, configMap *corev1.ConfigMapApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ConfigMap, err error) { if configMap == nil { return nil, fmt.Errorf("configMap provided to Apply must not be nil") } @@ -146,10 +145,10 @@ func (c *FakeConfigMaps) Apply(ctx context.Context, configMap *applyconfiguratio return nil, fmt.Errorf("configMap.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(configmapsResource, c.ns, *name, types.ApplyPatchType, data), &corev1.ConfigMap{}) + Invokes(testing.NewPatchSubresourceAction(configmapsResource, c.ns, *name, types.ApplyPatchType, data), &v1.ConfigMap{}) if obj == nil { return nil, err } - return obj.(*corev1.ConfigMap), err + return obj.(*v1.ConfigMap), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go index e9a515a1af28..6b2f6c249e31 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + corev1 "k8s.io/client-go/applyconfigurations/core/v1" testing "k8s.io/client-go/testing" ) @@ -39,25 +38,25 @@ type FakeEndpoints struct { ns string } -var endpointsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "endpoints"} +var endpointsResource = v1.SchemeGroupVersion.WithResource("endpoints") -var endpointsKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Endpoints"} +var endpointsKind = v1.SchemeGroupVersion.WithKind("Endpoints") // Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any. -func (c *FakeEndpoints) Get(ctx context.Context, name string, options v1.GetOptions) (result *corev1.Endpoints, err error) { +func (c *FakeEndpoints) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Endpoints, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(endpointsResource, c.ns, name), &corev1.Endpoints{}) + Invokes(testing.NewGetAction(endpointsResource, c.ns, name), &v1.Endpoints{}) if obj == nil { return nil, err } - return obj.(*corev1.Endpoints), err + return obj.(*v1.Endpoints), err } // List takes label and field selectors, and returns the list of Endpoints that match those selectors. -func (c *FakeEndpoints) List(ctx context.Context, opts v1.ListOptions) (result *corev1.EndpointsList, err error) { +func (c *FakeEndpoints) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EndpointsList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(endpointsResource, endpointsKind, c.ns, opts), &corev1.EndpointsList{}) + Invokes(testing.NewListAction(endpointsResource, endpointsKind, c.ns, opts), &v1.EndpointsList{}) if obj == nil { return nil, err @@ -67,8 +66,8 @@ func (c *FakeEndpoints) List(ctx context.Context, opts v1.ListOptions) (result * if label == nil { label = labels.Everything() } - list := &corev1.EndpointsList{ListMeta: obj.(*corev1.EndpointsList).ListMeta} - for _, item := range obj.(*corev1.EndpointsList).Items { + list := &v1.EndpointsList{ListMeta: obj.(*v1.EndpointsList).ListMeta} + for _, item := range obj.(*v1.EndpointsList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -77,63 +76,63 @@ func (c *FakeEndpoints) List(ctx context.Context, opts v1.ListOptions) (result * } // Watch returns a watch.Interface that watches the requested endpoints. -func (c *FakeEndpoints) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeEndpoints) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(endpointsResource, c.ns, opts)) } // Create takes the representation of a endpoints and creates it. Returns the server's representation of the endpoints, and an error, if there is any. -func (c *FakeEndpoints) Create(ctx context.Context, endpoints *corev1.Endpoints, opts v1.CreateOptions) (result *corev1.Endpoints, err error) { +func (c *FakeEndpoints) Create(ctx context.Context, endpoints *v1.Endpoints, opts metav1.CreateOptions) (result *v1.Endpoints, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(endpointsResource, c.ns, endpoints), &corev1.Endpoints{}) + Invokes(testing.NewCreateAction(endpointsResource, c.ns, endpoints), &v1.Endpoints{}) if obj == nil { return nil, err } - return obj.(*corev1.Endpoints), err + return obj.(*v1.Endpoints), err } // Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any. -func (c *FakeEndpoints) Update(ctx context.Context, endpoints *corev1.Endpoints, opts v1.UpdateOptions) (result *corev1.Endpoints, err error) { +func (c *FakeEndpoints) Update(ctx context.Context, endpoints *v1.Endpoints, opts metav1.UpdateOptions) (result *v1.Endpoints, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(endpointsResource, c.ns, endpoints), &corev1.Endpoints{}) + Invokes(testing.NewUpdateAction(endpointsResource, c.ns, endpoints), &v1.Endpoints{}) if obj == nil { return nil, err } - return obj.(*corev1.Endpoints), err + return obj.(*v1.Endpoints), err } // Delete takes name of the endpoints and deletes it. Returns an error if one occurs. -func (c *FakeEndpoints) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeEndpoints) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(endpointsResource, c.ns, name, opts), &corev1.Endpoints{}) + Invokes(testing.NewDeleteActionWithOptions(endpointsResource, c.ns, name, opts), &v1.Endpoints{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeEndpoints) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeEndpoints) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewDeleteCollectionAction(endpointsResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &corev1.EndpointsList{}) + _, err := c.Fake.Invokes(action, &v1.EndpointsList{}) return err } // Patch applies the patch and returns the patched endpoints. -func (c *FakeEndpoints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.Endpoints, err error) { +func (c *FakeEndpoints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Endpoints, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(endpointsResource, c.ns, name, pt, data, subresources...), &corev1.Endpoints{}) + Invokes(testing.NewPatchSubresourceAction(endpointsResource, c.ns, name, pt, data, subresources...), &v1.Endpoints{}) if obj == nil { return nil, err } - return obj.(*corev1.Endpoints), err + return obj.(*v1.Endpoints), err } // Apply takes the given apply declarative configuration, applies it and returns the applied endpoints. -func (c *FakeEndpoints) Apply(ctx context.Context, endpoints *applyconfigurationscorev1.EndpointsApplyConfiguration, opts v1.ApplyOptions) (result *corev1.Endpoints, err error) { +func (c *FakeEndpoints) Apply(ctx context.Context, endpoints *corev1.EndpointsApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Endpoints, err error) { if endpoints == nil { return nil, fmt.Errorf("endpoints provided to Apply must not be nil") } @@ -146,10 +145,10 @@ func (c *FakeEndpoints) Apply(ctx context.Context, endpoints *applyconfiguration return nil, fmt.Errorf("endpoints.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(endpointsResource, c.ns, *name, types.ApplyPatchType, data), &corev1.Endpoints{}) + Invokes(testing.NewPatchSubresourceAction(endpointsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Endpoints{}) if obj == nil { return nil, err } - return obj.(*corev1.Endpoints), err + return obj.(*v1.Endpoints), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go index 9518839431a7..9ad879b3943e 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + corev1 "k8s.io/client-go/applyconfigurations/core/v1" testing "k8s.io/client-go/testing" ) @@ -39,25 +38,25 @@ type FakeEvents struct { ns string } -var eventsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "events"} +var eventsResource = v1.SchemeGroupVersion.WithResource("events") -var eventsKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Event"} +var eventsKind = v1.SchemeGroupVersion.WithKind("Event") // Get takes name of the event, and returns the corresponding event object, and an error if there is any. -func (c *FakeEvents) Get(ctx context.Context, name string, options v1.GetOptions) (result *corev1.Event, err error) { +func (c *FakeEvents) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Event, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(eventsResource, c.ns, name), &corev1.Event{}) + Invokes(testing.NewGetAction(eventsResource, c.ns, name), &v1.Event{}) if obj == nil { return nil, err } - return obj.(*corev1.Event), err + return obj.(*v1.Event), err } // List takes label and field selectors, and returns the list of Events that match those selectors. -func (c *FakeEvents) List(ctx context.Context, opts v1.ListOptions) (result *corev1.EventList, err error) { +func (c *FakeEvents) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EventList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(eventsResource, eventsKind, c.ns, opts), &corev1.EventList{}) + Invokes(testing.NewListAction(eventsResource, eventsKind, c.ns, opts), &v1.EventList{}) if obj == nil { return nil, err @@ -67,8 +66,8 @@ func (c *FakeEvents) List(ctx context.Context, opts v1.ListOptions) (result *cor if label == nil { label = labels.Everything() } - list := &corev1.EventList{ListMeta: obj.(*corev1.EventList).ListMeta} - for _, item := range obj.(*corev1.EventList).Items { + list := &v1.EventList{ListMeta: obj.(*v1.EventList).ListMeta} + for _, item := range obj.(*v1.EventList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -77,63 +76,63 @@ func (c *FakeEvents) List(ctx context.Context, opts v1.ListOptions) (result *cor } // Watch returns a watch.Interface that watches the requested events. -func (c *FakeEvents) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeEvents) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(eventsResource, c.ns, opts)) } // Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *FakeEvents) Create(ctx context.Context, event *corev1.Event, opts v1.CreateOptions) (result *corev1.Event, err error) { +func (c *FakeEvents) Create(ctx context.Context, event *v1.Event, opts metav1.CreateOptions) (result *v1.Event, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(eventsResource, c.ns, event), &corev1.Event{}) + Invokes(testing.NewCreateAction(eventsResource, c.ns, event), &v1.Event{}) if obj == nil { return nil, err } - return obj.(*corev1.Event), err + return obj.(*v1.Event), err } // Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *FakeEvents) Update(ctx context.Context, event *corev1.Event, opts v1.UpdateOptions) (result *corev1.Event, err error) { +func (c *FakeEvents) Update(ctx context.Context, event *v1.Event, opts metav1.UpdateOptions) (result *v1.Event, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(eventsResource, c.ns, event), &corev1.Event{}) + Invokes(testing.NewUpdateAction(eventsResource, c.ns, event), &v1.Event{}) if obj == nil { return nil, err } - return obj.(*corev1.Event), err + return obj.(*v1.Event), err } // Delete takes name of the event and deletes it. Returns an error if one occurs. -func (c *FakeEvents) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeEvents) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(eventsResource, c.ns, name, opts), &corev1.Event{}) + Invokes(testing.NewDeleteActionWithOptions(eventsResource, c.ns, name, opts), &v1.Event{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeEvents) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeEvents) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewDeleteCollectionAction(eventsResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &corev1.EventList{}) + _, err := c.Fake.Invokes(action, &v1.EventList{}) return err } // Patch applies the patch and returns the patched event. -func (c *FakeEvents) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.Event, err error) { +func (c *FakeEvents) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Event, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, pt, data, subresources...), &corev1.Event{}) + Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, pt, data, subresources...), &v1.Event{}) if obj == nil { return nil, err } - return obj.(*corev1.Event), err + return obj.(*v1.Event), err } // Apply takes the given apply declarative configuration, applies it and returns the applied event. -func (c *FakeEvents) Apply(ctx context.Context, event *applyconfigurationscorev1.EventApplyConfiguration, opts v1.ApplyOptions) (result *corev1.Event, err error) { +func (c *FakeEvents) Apply(ctx context.Context, event *corev1.EventApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Event, err error) { if event == nil { return nil, fmt.Errorf("event provided to Apply must not be nil") } @@ -146,10 +145,10 @@ func (c *FakeEvents) Apply(ctx context.Context, event *applyconfigurationscorev1 return nil, fmt.Errorf("event.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, *name, types.ApplyPatchType, data), &corev1.Event{}) + Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Event{}) if obj == nil { return nil, err } - return obj.(*corev1.Event), err + return obj.(*v1.Event), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go index 7487285e9106..f18b5741c386 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + corev1 "k8s.io/client-go/applyconfigurations/core/v1" testing "k8s.io/client-go/testing" ) @@ -39,25 +38,25 @@ type FakeLimitRanges struct { ns string } -var limitrangesResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "limitranges"} +var limitrangesResource = v1.SchemeGroupVersion.WithResource("limitranges") -var limitrangesKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "LimitRange"} +var limitrangesKind = v1.SchemeGroupVersion.WithKind("LimitRange") // Get takes name of the limitRange, and returns the corresponding limitRange object, and an error if there is any. -func (c *FakeLimitRanges) Get(ctx context.Context, name string, options v1.GetOptions) (result *corev1.LimitRange, err error) { +func (c *FakeLimitRanges) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.LimitRange, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(limitrangesResource, c.ns, name), &corev1.LimitRange{}) + Invokes(testing.NewGetAction(limitrangesResource, c.ns, name), &v1.LimitRange{}) if obj == nil { return nil, err } - return obj.(*corev1.LimitRange), err + return obj.(*v1.LimitRange), err } // List takes label and field selectors, and returns the list of LimitRanges that match those selectors. -func (c *FakeLimitRanges) List(ctx context.Context, opts v1.ListOptions) (result *corev1.LimitRangeList, err error) { +func (c *FakeLimitRanges) List(ctx context.Context, opts metav1.ListOptions) (result *v1.LimitRangeList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(limitrangesResource, limitrangesKind, c.ns, opts), &corev1.LimitRangeList{}) + Invokes(testing.NewListAction(limitrangesResource, limitrangesKind, c.ns, opts), &v1.LimitRangeList{}) if obj == nil { return nil, err @@ -67,8 +66,8 @@ func (c *FakeLimitRanges) List(ctx context.Context, opts v1.ListOptions) (result if label == nil { label = labels.Everything() } - list := &corev1.LimitRangeList{ListMeta: obj.(*corev1.LimitRangeList).ListMeta} - for _, item := range obj.(*corev1.LimitRangeList).Items { + list := &v1.LimitRangeList{ListMeta: obj.(*v1.LimitRangeList).ListMeta} + for _, item := range obj.(*v1.LimitRangeList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -77,63 +76,63 @@ func (c *FakeLimitRanges) List(ctx context.Context, opts v1.ListOptions) (result } // Watch returns a watch.Interface that watches the requested limitRanges. -func (c *FakeLimitRanges) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeLimitRanges) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(limitrangesResource, c.ns, opts)) } // Create takes the representation of a limitRange and creates it. Returns the server's representation of the limitRange, and an error, if there is any. -func (c *FakeLimitRanges) Create(ctx context.Context, limitRange *corev1.LimitRange, opts v1.CreateOptions) (result *corev1.LimitRange, err error) { +func (c *FakeLimitRanges) Create(ctx context.Context, limitRange *v1.LimitRange, opts metav1.CreateOptions) (result *v1.LimitRange, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(limitrangesResource, c.ns, limitRange), &corev1.LimitRange{}) + Invokes(testing.NewCreateAction(limitrangesResource, c.ns, limitRange), &v1.LimitRange{}) if obj == nil { return nil, err } - return obj.(*corev1.LimitRange), err + return obj.(*v1.LimitRange), err } // Update takes the representation of a limitRange and updates it. Returns the server's representation of the limitRange, and an error, if there is any. -func (c *FakeLimitRanges) Update(ctx context.Context, limitRange *corev1.LimitRange, opts v1.UpdateOptions) (result *corev1.LimitRange, err error) { +func (c *FakeLimitRanges) Update(ctx context.Context, limitRange *v1.LimitRange, opts metav1.UpdateOptions) (result *v1.LimitRange, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(limitrangesResource, c.ns, limitRange), &corev1.LimitRange{}) + Invokes(testing.NewUpdateAction(limitrangesResource, c.ns, limitRange), &v1.LimitRange{}) if obj == nil { return nil, err } - return obj.(*corev1.LimitRange), err + return obj.(*v1.LimitRange), err } // Delete takes name of the limitRange and deletes it. Returns an error if one occurs. -func (c *FakeLimitRanges) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeLimitRanges) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(limitrangesResource, c.ns, name, opts), &corev1.LimitRange{}) + Invokes(testing.NewDeleteActionWithOptions(limitrangesResource, c.ns, name, opts), &v1.LimitRange{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeLimitRanges) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeLimitRanges) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewDeleteCollectionAction(limitrangesResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &corev1.LimitRangeList{}) + _, err := c.Fake.Invokes(action, &v1.LimitRangeList{}) return err } // Patch applies the patch and returns the patched limitRange. -func (c *FakeLimitRanges) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.LimitRange, err error) { +func (c *FakeLimitRanges) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.LimitRange, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(limitrangesResource, c.ns, name, pt, data, subresources...), &corev1.LimitRange{}) + Invokes(testing.NewPatchSubresourceAction(limitrangesResource, c.ns, name, pt, data, subresources...), &v1.LimitRange{}) if obj == nil { return nil, err } - return obj.(*corev1.LimitRange), err + return obj.(*v1.LimitRange), err } // Apply takes the given apply declarative configuration, applies it and returns the applied limitRange. -func (c *FakeLimitRanges) Apply(ctx context.Context, limitRange *applyconfigurationscorev1.LimitRangeApplyConfiguration, opts v1.ApplyOptions) (result *corev1.LimitRange, err error) { +func (c *FakeLimitRanges) Apply(ctx context.Context, limitRange *corev1.LimitRangeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.LimitRange, err error) { if limitRange == nil { return nil, fmt.Errorf("limitRange provided to Apply must not be nil") } @@ -146,10 +145,10 @@ func (c *FakeLimitRanges) Apply(ctx context.Context, limitRange *applyconfigurat return nil, fmt.Errorf("limitRange.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(limitrangesResource, c.ns, *name, types.ApplyPatchType, data), &corev1.LimitRange{}) + Invokes(testing.NewPatchSubresourceAction(limitrangesResource, c.ns, *name, types.ApplyPatchType, data), &v1.LimitRange{}) if obj == nil { return nil, err } - return obj.(*corev1.LimitRange), err + return obj.(*v1.LimitRange), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go index 83ada9f72359..52fcff591e94 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + corev1 "k8s.io/client-go/applyconfigurations/core/v1" testing "k8s.io/client-go/testing" ) @@ -38,24 +37,24 @@ type FakeNamespaces struct { Fake *FakeCoreV1 } -var namespacesResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"} +var namespacesResource = v1.SchemeGroupVersion.WithResource("namespaces") -var namespacesKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Namespace"} +var namespacesKind = v1.SchemeGroupVersion.WithKind("Namespace") // Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any. -func (c *FakeNamespaces) Get(ctx context.Context, name string, options v1.GetOptions) (result *corev1.Namespace, err error) { +func (c *FakeNamespaces) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Namespace, err error) { obj, err := c.Fake. - Invokes(testing.NewRootGetAction(namespacesResource, name), &corev1.Namespace{}) + Invokes(testing.NewRootGetAction(namespacesResource, name), &v1.Namespace{}) if obj == nil { return nil, err } - return obj.(*corev1.Namespace), err + return obj.(*v1.Namespace), err } // List takes label and field selectors, and returns the list of Namespaces that match those selectors. -func (c *FakeNamespaces) List(ctx context.Context, opts v1.ListOptions) (result *corev1.NamespaceList, err error) { +func (c *FakeNamespaces) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NamespaceList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootListAction(namespacesResource, namespacesKind, opts), &corev1.NamespaceList{}) + Invokes(testing.NewRootListAction(namespacesResource, namespacesKind, opts), &v1.NamespaceList{}) if obj == nil { return nil, err } @@ -64,8 +63,8 @@ func (c *FakeNamespaces) List(ctx context.Context, opts v1.ListOptions) (result if label == nil { label = labels.Everything() } - list := &corev1.NamespaceList{ListMeta: obj.(*corev1.NamespaceList).ListMeta} - for _, item := range obj.(*corev1.NamespaceList).Items { + list := &v1.NamespaceList{ListMeta: obj.(*v1.NamespaceList).ListMeta} + for _, item := range obj.(*v1.NamespaceList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -74,61 +73,61 @@ func (c *FakeNamespaces) List(ctx context.Context, opts v1.ListOptions) (result } // Watch returns a watch.Interface that watches the requested namespaces. -func (c *FakeNamespaces) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeNamespaces) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(namespacesResource, opts)) } // Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *FakeNamespaces) Create(ctx context.Context, namespace *corev1.Namespace, opts v1.CreateOptions) (result *corev1.Namespace, err error) { +func (c *FakeNamespaces) Create(ctx context.Context, namespace *v1.Namespace, opts metav1.CreateOptions) (result *v1.Namespace, err error) { obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(namespacesResource, namespace), &corev1.Namespace{}) + Invokes(testing.NewRootCreateAction(namespacesResource, namespace), &v1.Namespace{}) if obj == nil { return nil, err } - return obj.(*corev1.Namespace), err + return obj.(*v1.Namespace), err } // Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *FakeNamespaces) Update(ctx context.Context, namespace *corev1.Namespace, opts v1.UpdateOptions) (result *corev1.Namespace, err error) { +func (c *FakeNamespaces) Update(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(namespacesResource, namespace), &corev1.Namespace{}) + Invokes(testing.NewRootUpdateAction(namespacesResource, namespace), &v1.Namespace{}) if obj == nil { return nil, err } - return obj.(*corev1.Namespace), err + return obj.(*v1.Namespace), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeNamespaces) UpdateStatus(ctx context.Context, namespace *corev1.Namespace, opts v1.UpdateOptions) (*corev1.Namespace, error) { +func (c *FakeNamespaces) UpdateStatus(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (*v1.Namespace, error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(namespacesResource, "status", namespace), &corev1.Namespace{}) + Invokes(testing.NewRootUpdateSubresourceAction(namespacesResource, "status", namespace), &v1.Namespace{}) if obj == nil { return nil, err } - return obj.(*corev1.Namespace), err + return obj.(*v1.Namespace), err } // Delete takes name of the namespace and deletes it. Returns an error if one occurs. -func (c *FakeNamespaces) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeNamespaces) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(namespacesResource, name, opts), &corev1.Namespace{}) + Invokes(testing.NewRootDeleteActionWithOptions(namespacesResource, name, opts), &v1.Namespace{}) return err } // Patch applies the patch and returns the patched namespace. -func (c *FakeNamespaces) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.Namespace, err error) { +func (c *FakeNamespaces) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Namespace, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(namespacesResource, name, pt, data, subresources...), &corev1.Namespace{}) + Invokes(testing.NewRootPatchSubresourceAction(namespacesResource, name, pt, data, subresources...), &v1.Namespace{}) if obj == nil { return nil, err } - return obj.(*corev1.Namespace), err + return obj.(*v1.Namespace), err } // Apply takes the given apply declarative configuration, applies it and returns the applied namespace. -func (c *FakeNamespaces) Apply(ctx context.Context, namespace *applyconfigurationscorev1.NamespaceApplyConfiguration, opts v1.ApplyOptions) (result *corev1.Namespace, err error) { +func (c *FakeNamespaces) Apply(ctx context.Context, namespace *corev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Namespace, err error) { if namespace == nil { return nil, fmt.Errorf("namespace provided to Apply must not be nil") } @@ -141,16 +140,16 @@ func (c *FakeNamespaces) Apply(ctx context.Context, namespace *applyconfiguratio return nil, fmt.Errorf("namespace.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(namespacesResource, *name, types.ApplyPatchType, data), &corev1.Namespace{}) + Invokes(testing.NewRootPatchSubresourceAction(namespacesResource, *name, types.ApplyPatchType, data), &v1.Namespace{}) if obj == nil { return nil, err } - return obj.(*corev1.Namespace), err + return obj.(*v1.Namespace), err } // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeNamespaces) ApplyStatus(ctx context.Context, namespace *applyconfigurationscorev1.NamespaceApplyConfiguration, opts v1.ApplyOptions) (result *corev1.Namespace, err error) { +func (c *FakeNamespaces) ApplyStatus(ctx context.Context, namespace *corev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Namespace, err error) { if namespace == nil { return nil, fmt.Errorf("namespace provided to Apply must not be nil") } @@ -163,9 +162,9 @@ func (c *FakeNamespaces) ApplyStatus(ctx context.Context, namespace *applyconfig return nil, fmt.Errorf("namespace.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(namespacesResource, *name, types.ApplyPatchType, data, "status"), &corev1.Namespace{}) + Invokes(testing.NewRootPatchSubresourceAction(namespacesResource, *name, types.ApplyPatchType, data, "status"), &v1.Namespace{}) if obj == nil { return nil, err } - return obj.(*corev1.Namespace), err + return obj.(*v1.Namespace), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go index 82816deb449b..5df40f8d1133 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + corev1 "k8s.io/client-go/applyconfigurations/core/v1" testing "k8s.io/client-go/testing" ) @@ -38,24 +37,24 @@ type FakeNodes struct { Fake *FakeCoreV1 } -var nodesResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "nodes"} +var nodesResource = v1.SchemeGroupVersion.WithResource("nodes") -var nodesKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Node"} +var nodesKind = v1.SchemeGroupVersion.WithKind("Node") // Get takes name of the node, and returns the corresponding node object, and an error if there is any. -func (c *FakeNodes) Get(ctx context.Context, name string, options v1.GetOptions) (result *corev1.Node, err error) { +func (c *FakeNodes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Node, err error) { obj, err := c.Fake. - Invokes(testing.NewRootGetAction(nodesResource, name), &corev1.Node{}) + Invokes(testing.NewRootGetAction(nodesResource, name), &v1.Node{}) if obj == nil { return nil, err } - return obj.(*corev1.Node), err + return obj.(*v1.Node), err } // List takes label and field selectors, and returns the list of Nodes that match those selectors. -func (c *FakeNodes) List(ctx context.Context, opts v1.ListOptions) (result *corev1.NodeList, err error) { +func (c *FakeNodes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NodeList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootListAction(nodesResource, nodesKind, opts), &corev1.NodeList{}) + Invokes(testing.NewRootListAction(nodesResource, nodesKind, opts), &v1.NodeList{}) if obj == nil { return nil, err } @@ -64,8 +63,8 @@ func (c *FakeNodes) List(ctx context.Context, opts v1.ListOptions) (result *core if label == nil { label = labels.Everything() } - list := &corev1.NodeList{ListMeta: obj.(*corev1.NodeList).ListMeta} - for _, item := range obj.(*corev1.NodeList).Items { + list := &v1.NodeList{ListMeta: obj.(*v1.NodeList).ListMeta} + for _, item := range obj.(*v1.NodeList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -74,69 +73,69 @@ func (c *FakeNodes) List(ctx context.Context, opts v1.ListOptions) (result *core } // Watch returns a watch.Interface that watches the requested nodes. -func (c *FakeNodes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeNodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(nodesResource, opts)) } // Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any. -func (c *FakeNodes) Create(ctx context.Context, node *corev1.Node, opts v1.CreateOptions) (result *corev1.Node, err error) { +func (c *FakeNodes) Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (result *v1.Node, err error) { obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(nodesResource, node), &corev1.Node{}) + Invokes(testing.NewRootCreateAction(nodesResource, node), &v1.Node{}) if obj == nil { return nil, err } - return obj.(*corev1.Node), err + return obj.(*v1.Node), err } // Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any. -func (c *FakeNodes) Update(ctx context.Context, node *corev1.Node, opts v1.UpdateOptions) (result *corev1.Node, err error) { +func (c *FakeNodes) Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(nodesResource, node), &corev1.Node{}) + Invokes(testing.NewRootUpdateAction(nodesResource, node), &v1.Node{}) if obj == nil { return nil, err } - return obj.(*corev1.Node), err + return obj.(*v1.Node), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeNodes) UpdateStatus(ctx context.Context, node *corev1.Node, opts v1.UpdateOptions) (*corev1.Node, error) { +func (c *FakeNodes) UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(nodesResource, "status", node), &corev1.Node{}) + Invokes(testing.NewRootUpdateSubresourceAction(nodesResource, "status", node), &v1.Node{}) if obj == nil { return nil, err } - return obj.(*corev1.Node), err + return obj.(*v1.Node), err } // Delete takes name of the node and deletes it. Returns an error if one occurs. -func (c *FakeNodes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeNodes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(nodesResource, name, opts), &corev1.Node{}) + Invokes(testing.NewRootDeleteActionWithOptions(nodesResource, name, opts), &v1.Node{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeNodes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeNodes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(nodesResource, listOpts) - _, err := c.Fake.Invokes(action, &corev1.NodeList{}) + _, err := c.Fake.Invokes(action, &v1.NodeList{}) return err } // Patch applies the patch and returns the patched node. -func (c *FakeNodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.Node, err error) { +func (c *FakeNodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(nodesResource, name, pt, data, subresources...), &corev1.Node{}) + Invokes(testing.NewRootPatchSubresourceAction(nodesResource, name, pt, data, subresources...), &v1.Node{}) if obj == nil { return nil, err } - return obj.(*corev1.Node), err + return obj.(*v1.Node), err } // Apply takes the given apply declarative configuration, applies it and returns the applied node. -func (c *FakeNodes) Apply(ctx context.Context, node *applyconfigurationscorev1.NodeApplyConfiguration, opts v1.ApplyOptions) (result *corev1.Node, err error) { +func (c *FakeNodes) Apply(ctx context.Context, node *corev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error) { if node == nil { return nil, fmt.Errorf("node provided to Apply must not be nil") } @@ -149,16 +148,16 @@ func (c *FakeNodes) Apply(ctx context.Context, node *applyconfigurationscorev1.N return nil, fmt.Errorf("node.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(nodesResource, *name, types.ApplyPatchType, data), &corev1.Node{}) + Invokes(testing.NewRootPatchSubresourceAction(nodesResource, *name, types.ApplyPatchType, data), &v1.Node{}) if obj == nil { return nil, err } - return obj.(*corev1.Node), err + return obj.(*v1.Node), err } // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeNodes) ApplyStatus(ctx context.Context, node *applyconfigurationscorev1.NodeApplyConfiguration, opts v1.ApplyOptions) (result *corev1.Node, err error) { +func (c *FakeNodes) ApplyStatus(ctx context.Context, node *corev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error) { if node == nil { return nil, fmt.Errorf("node provided to Apply must not be nil") } @@ -171,9 +170,9 @@ func (c *FakeNodes) ApplyStatus(ctx context.Context, node *applyconfigurationsco return nil, fmt.Errorf("node.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(nodesResource, *name, types.ApplyPatchType, data, "status"), &corev1.Node{}) + Invokes(testing.NewRootPatchSubresourceAction(nodesResource, *name, types.ApplyPatchType, data, "status"), &v1.Node{}) if obj == nil { return nil, err } - return obj.(*corev1.Node), err + return obj.(*v1.Node), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go index d071a45dbb71..5b06d0b19235 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + corev1 "k8s.io/client-go/applyconfigurations/core/v1" testing "k8s.io/client-go/testing" ) @@ -38,24 +37,24 @@ type FakePersistentVolumes struct { Fake *FakeCoreV1 } -var persistentvolumesResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "persistentvolumes"} +var persistentvolumesResource = v1.SchemeGroupVersion.WithResource("persistentvolumes") -var persistentvolumesKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "PersistentVolume"} +var persistentvolumesKind = v1.SchemeGroupVersion.WithKind("PersistentVolume") // Get takes name of the persistentVolume, and returns the corresponding persistentVolume object, and an error if there is any. -func (c *FakePersistentVolumes) Get(ctx context.Context, name string, options v1.GetOptions) (result *corev1.PersistentVolume, err error) { +func (c *FakePersistentVolumes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PersistentVolume, err error) { obj, err := c.Fake. - Invokes(testing.NewRootGetAction(persistentvolumesResource, name), &corev1.PersistentVolume{}) + Invokes(testing.NewRootGetAction(persistentvolumesResource, name), &v1.PersistentVolume{}) if obj == nil { return nil, err } - return obj.(*corev1.PersistentVolume), err + return obj.(*v1.PersistentVolume), err } // List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors. -func (c *FakePersistentVolumes) List(ctx context.Context, opts v1.ListOptions) (result *corev1.PersistentVolumeList, err error) { +func (c *FakePersistentVolumes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PersistentVolumeList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootListAction(persistentvolumesResource, persistentvolumesKind, opts), &corev1.PersistentVolumeList{}) + Invokes(testing.NewRootListAction(persistentvolumesResource, persistentvolumesKind, opts), &v1.PersistentVolumeList{}) if obj == nil { return nil, err } @@ -64,8 +63,8 @@ func (c *FakePersistentVolumes) List(ctx context.Context, opts v1.ListOptions) ( if label == nil { label = labels.Everything() } - list := &corev1.PersistentVolumeList{ListMeta: obj.(*corev1.PersistentVolumeList).ListMeta} - for _, item := range obj.(*corev1.PersistentVolumeList).Items { + list := &v1.PersistentVolumeList{ListMeta: obj.(*v1.PersistentVolumeList).ListMeta} + for _, item := range obj.(*v1.PersistentVolumeList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -74,69 +73,69 @@ func (c *FakePersistentVolumes) List(ctx context.Context, opts v1.ListOptions) ( } // Watch returns a watch.Interface that watches the requested persistentVolumes. -func (c *FakePersistentVolumes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakePersistentVolumes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(persistentvolumesResource, opts)) } // Create takes the representation of a persistentVolume and creates it. Returns the server's representation of the persistentVolume, and an error, if there is any. -func (c *FakePersistentVolumes) Create(ctx context.Context, persistentVolume *corev1.PersistentVolume, opts v1.CreateOptions) (result *corev1.PersistentVolume, err error) { +func (c *FakePersistentVolumes) Create(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.CreateOptions) (result *v1.PersistentVolume, err error) { obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(persistentvolumesResource, persistentVolume), &corev1.PersistentVolume{}) + Invokes(testing.NewRootCreateAction(persistentvolumesResource, persistentVolume), &v1.PersistentVolume{}) if obj == nil { return nil, err } - return obj.(*corev1.PersistentVolume), err + return obj.(*v1.PersistentVolume), err } // Update takes the representation of a persistentVolume and updates it. Returns the server's representation of the persistentVolume, and an error, if there is any. -func (c *FakePersistentVolumes) Update(ctx context.Context, persistentVolume *corev1.PersistentVolume, opts v1.UpdateOptions) (result *corev1.PersistentVolume, err error) { +func (c *FakePersistentVolumes) Update(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (result *v1.PersistentVolume, err error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(persistentvolumesResource, persistentVolume), &corev1.PersistentVolume{}) + Invokes(testing.NewRootUpdateAction(persistentvolumesResource, persistentVolume), &v1.PersistentVolume{}) if obj == nil { return nil, err } - return obj.(*corev1.PersistentVolume), err + return obj.(*v1.PersistentVolume), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePersistentVolumes) UpdateStatus(ctx context.Context, persistentVolume *corev1.PersistentVolume, opts v1.UpdateOptions) (*corev1.PersistentVolume, error) { +func (c *FakePersistentVolumes) UpdateStatus(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (*v1.PersistentVolume, error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(persistentvolumesResource, "status", persistentVolume), &corev1.PersistentVolume{}) + Invokes(testing.NewRootUpdateSubresourceAction(persistentvolumesResource, "status", persistentVolume), &v1.PersistentVolume{}) if obj == nil { return nil, err } - return obj.(*corev1.PersistentVolume), err + return obj.(*v1.PersistentVolume), err } // Delete takes name of the persistentVolume and deletes it. Returns an error if one occurs. -func (c *FakePersistentVolumes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakePersistentVolumes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(persistentvolumesResource, name, opts), &corev1.PersistentVolume{}) + Invokes(testing.NewRootDeleteActionWithOptions(persistentvolumesResource, name, opts), &v1.PersistentVolume{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakePersistentVolumes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakePersistentVolumes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(persistentvolumesResource, listOpts) - _, err := c.Fake.Invokes(action, &corev1.PersistentVolumeList{}) + _, err := c.Fake.Invokes(action, &v1.PersistentVolumeList{}) return err } // Patch applies the patch and returns the patched persistentVolume. -func (c *FakePersistentVolumes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.PersistentVolume, err error) { +func (c *FakePersistentVolumes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolume, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(persistentvolumesResource, name, pt, data, subresources...), &corev1.PersistentVolume{}) + Invokes(testing.NewRootPatchSubresourceAction(persistentvolumesResource, name, pt, data, subresources...), &v1.PersistentVolume{}) if obj == nil { return nil, err } - return obj.(*corev1.PersistentVolume), err + return obj.(*v1.PersistentVolume), err } // Apply takes the given apply declarative configuration, applies it and returns the applied persistentVolume. -func (c *FakePersistentVolumes) Apply(ctx context.Context, persistentVolume *applyconfigurationscorev1.PersistentVolumeApplyConfiguration, opts v1.ApplyOptions) (result *corev1.PersistentVolume, err error) { +func (c *FakePersistentVolumes) Apply(ctx context.Context, persistentVolume *corev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolume, err error) { if persistentVolume == nil { return nil, fmt.Errorf("persistentVolume provided to Apply must not be nil") } @@ -149,16 +148,16 @@ func (c *FakePersistentVolumes) Apply(ctx context.Context, persistentVolume *app return nil, fmt.Errorf("persistentVolume.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(persistentvolumesResource, *name, types.ApplyPatchType, data), &corev1.PersistentVolume{}) + Invokes(testing.NewRootPatchSubresourceAction(persistentvolumesResource, *name, types.ApplyPatchType, data), &v1.PersistentVolume{}) if obj == nil { return nil, err } - return obj.(*corev1.PersistentVolume), err + return obj.(*v1.PersistentVolume), err } // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakePersistentVolumes) ApplyStatus(ctx context.Context, persistentVolume *applyconfigurationscorev1.PersistentVolumeApplyConfiguration, opts v1.ApplyOptions) (result *corev1.PersistentVolume, err error) { +func (c *FakePersistentVolumes) ApplyStatus(ctx context.Context, persistentVolume *corev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolume, err error) { if persistentVolume == nil { return nil, fmt.Errorf("persistentVolume provided to Apply must not be nil") } @@ -171,9 +170,9 @@ func (c *FakePersistentVolumes) ApplyStatus(ctx context.Context, persistentVolum return nil, fmt.Errorf("persistentVolume.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(persistentvolumesResource, *name, types.ApplyPatchType, data, "status"), &corev1.PersistentVolume{}) + Invokes(testing.NewRootPatchSubresourceAction(persistentvolumesResource, *name, types.ApplyPatchType, data, "status"), &v1.PersistentVolume{}) if obj == nil { return nil, err } - return obj.(*corev1.PersistentVolume), err + return obj.(*v1.PersistentVolume), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go index 6e4cef26023e..b860e536744d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + corev1 "k8s.io/client-go/applyconfigurations/core/v1" testing "k8s.io/client-go/testing" ) @@ -39,25 +38,25 @@ type FakePersistentVolumeClaims struct { ns string } -var persistentvolumeclaimsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "persistentvolumeclaims"} +var persistentvolumeclaimsResource = v1.SchemeGroupVersion.WithResource("persistentvolumeclaims") -var persistentvolumeclaimsKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "PersistentVolumeClaim"} +var persistentvolumeclaimsKind = v1.SchemeGroupVersion.WithKind("PersistentVolumeClaim") // Get takes name of the persistentVolumeClaim, and returns the corresponding persistentVolumeClaim object, and an error if there is any. -func (c *FakePersistentVolumeClaims) Get(ctx context.Context, name string, options v1.GetOptions) (result *corev1.PersistentVolumeClaim, err error) { +func (c *FakePersistentVolumeClaims) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PersistentVolumeClaim, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(persistentvolumeclaimsResource, c.ns, name), &corev1.PersistentVolumeClaim{}) + Invokes(testing.NewGetAction(persistentvolumeclaimsResource, c.ns, name), &v1.PersistentVolumeClaim{}) if obj == nil { return nil, err } - return obj.(*corev1.PersistentVolumeClaim), err + return obj.(*v1.PersistentVolumeClaim), err } // List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors. -func (c *FakePersistentVolumeClaims) List(ctx context.Context, opts v1.ListOptions) (result *corev1.PersistentVolumeClaimList, err error) { +func (c *FakePersistentVolumeClaims) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PersistentVolumeClaimList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(persistentvolumeclaimsResource, persistentvolumeclaimsKind, c.ns, opts), &corev1.PersistentVolumeClaimList{}) + Invokes(testing.NewListAction(persistentvolumeclaimsResource, persistentvolumeclaimsKind, c.ns, opts), &v1.PersistentVolumeClaimList{}) if obj == nil { return nil, err @@ -67,8 +66,8 @@ func (c *FakePersistentVolumeClaims) List(ctx context.Context, opts v1.ListOptio if label == nil { label = labels.Everything() } - list := &corev1.PersistentVolumeClaimList{ListMeta: obj.(*corev1.PersistentVolumeClaimList).ListMeta} - for _, item := range obj.(*corev1.PersistentVolumeClaimList).Items { + list := &v1.PersistentVolumeClaimList{ListMeta: obj.(*v1.PersistentVolumeClaimList).ListMeta} + for _, item := range obj.(*v1.PersistentVolumeClaimList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -77,75 +76,75 @@ func (c *FakePersistentVolumeClaims) List(ctx context.Context, opts v1.ListOptio } // Watch returns a watch.Interface that watches the requested persistentVolumeClaims. -func (c *FakePersistentVolumeClaims) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakePersistentVolumeClaims) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(persistentvolumeclaimsResource, c.ns, opts)) } // Create takes the representation of a persistentVolumeClaim and creates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. -func (c *FakePersistentVolumeClaims) Create(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaim, opts v1.CreateOptions) (result *corev1.PersistentVolumeClaim, err error) { +func (c *FakePersistentVolumeClaims) Create(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.CreateOptions) (result *v1.PersistentVolumeClaim, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim), &corev1.PersistentVolumeClaim{}) + Invokes(testing.NewCreateAction(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim), &v1.PersistentVolumeClaim{}) if obj == nil { return nil, err } - return obj.(*corev1.PersistentVolumeClaim), err + return obj.(*v1.PersistentVolumeClaim), err } // Update takes the representation of a persistentVolumeClaim and updates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. -func (c *FakePersistentVolumeClaims) Update(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaim, opts v1.UpdateOptions) (result *corev1.PersistentVolumeClaim, err error) { +func (c *FakePersistentVolumeClaims) Update(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (result *v1.PersistentVolumeClaim, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim), &corev1.PersistentVolumeClaim{}) + Invokes(testing.NewUpdateAction(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim), &v1.PersistentVolumeClaim{}) if obj == nil { return nil, err } - return obj.(*corev1.PersistentVolumeClaim), err + return obj.(*v1.PersistentVolumeClaim), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePersistentVolumeClaims) UpdateStatus(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaim, opts v1.UpdateOptions) (*corev1.PersistentVolumeClaim, error) { +func (c *FakePersistentVolumeClaims) UpdateStatus(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (*v1.PersistentVolumeClaim, error) { obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(persistentvolumeclaimsResource, "status", c.ns, persistentVolumeClaim), &corev1.PersistentVolumeClaim{}) + Invokes(testing.NewUpdateSubresourceAction(persistentvolumeclaimsResource, "status", c.ns, persistentVolumeClaim), &v1.PersistentVolumeClaim{}) if obj == nil { return nil, err } - return obj.(*corev1.PersistentVolumeClaim), err + return obj.(*v1.PersistentVolumeClaim), err } // Delete takes name of the persistentVolumeClaim and deletes it. Returns an error if one occurs. -func (c *FakePersistentVolumeClaims) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakePersistentVolumeClaims) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(persistentvolumeclaimsResource, c.ns, name, opts), &corev1.PersistentVolumeClaim{}) + Invokes(testing.NewDeleteActionWithOptions(persistentvolumeclaimsResource, c.ns, name, opts), &v1.PersistentVolumeClaim{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakePersistentVolumeClaims) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakePersistentVolumeClaims) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewDeleteCollectionAction(persistentvolumeclaimsResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &corev1.PersistentVolumeClaimList{}) + _, err := c.Fake.Invokes(action, &v1.PersistentVolumeClaimList{}) return err } // Patch applies the patch and returns the patched persistentVolumeClaim. -func (c *FakePersistentVolumeClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.PersistentVolumeClaim, err error) { +func (c *FakePersistentVolumeClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolumeClaim, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(persistentvolumeclaimsResource, c.ns, name, pt, data, subresources...), &corev1.PersistentVolumeClaim{}) + Invokes(testing.NewPatchSubresourceAction(persistentvolumeclaimsResource, c.ns, name, pt, data, subresources...), &v1.PersistentVolumeClaim{}) if obj == nil { return nil, err } - return obj.(*corev1.PersistentVolumeClaim), err + return obj.(*v1.PersistentVolumeClaim), err } // Apply takes the given apply declarative configuration, applies it and returns the applied persistentVolumeClaim. -func (c *FakePersistentVolumeClaims) Apply(ctx context.Context, persistentVolumeClaim *applyconfigurationscorev1.PersistentVolumeClaimApplyConfiguration, opts v1.ApplyOptions) (result *corev1.PersistentVolumeClaim, err error) { +func (c *FakePersistentVolumeClaims) Apply(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolumeClaim, err error) { if persistentVolumeClaim == nil { return nil, fmt.Errorf("persistentVolumeClaim provided to Apply must not be nil") } @@ -158,17 +157,17 @@ func (c *FakePersistentVolumeClaims) Apply(ctx context.Context, persistentVolume return nil, fmt.Errorf("persistentVolumeClaim.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(persistentvolumeclaimsResource, c.ns, *name, types.ApplyPatchType, data), &corev1.PersistentVolumeClaim{}) + Invokes(testing.NewPatchSubresourceAction(persistentvolumeclaimsResource, c.ns, *name, types.ApplyPatchType, data), &v1.PersistentVolumeClaim{}) if obj == nil { return nil, err } - return obj.(*corev1.PersistentVolumeClaim), err + return obj.(*v1.PersistentVolumeClaim), err } // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakePersistentVolumeClaims) ApplyStatus(ctx context.Context, persistentVolumeClaim *applyconfigurationscorev1.PersistentVolumeClaimApplyConfiguration, opts v1.ApplyOptions) (result *corev1.PersistentVolumeClaim, err error) { +func (c *FakePersistentVolumeClaims) ApplyStatus(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolumeClaim, err error) { if persistentVolumeClaim == nil { return nil, fmt.Errorf("persistentVolumeClaim provided to Apply must not be nil") } @@ -181,10 +180,10 @@ func (c *FakePersistentVolumeClaims) ApplyStatus(ctx context.Context, persistent return nil, fmt.Errorf("persistentVolumeClaim.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(persistentvolumeclaimsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &corev1.PersistentVolumeClaim{}) + Invokes(testing.NewPatchSubresourceAction(persistentvolumeclaimsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.PersistentVolumeClaim{}) if obj == nil { return nil, err } - return obj.(*corev1.PersistentVolumeClaim), err + return obj.(*v1.PersistentVolumeClaim), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go index 38a147955017..23634c7d0714 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + corev1 "k8s.io/client-go/applyconfigurations/core/v1" testing "k8s.io/client-go/testing" ) @@ -39,25 +38,25 @@ type FakePods struct { ns string } -var podsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"} +var podsResource = v1.SchemeGroupVersion.WithResource("pods") -var podsKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"} +var podsKind = v1.SchemeGroupVersion.WithKind("Pod") // Get takes name of the pod, and returns the corresponding pod object, and an error if there is any. -func (c *FakePods) Get(ctx context.Context, name string, options v1.GetOptions) (result *corev1.Pod, err error) { +func (c *FakePods) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Pod, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(podsResource, c.ns, name), &corev1.Pod{}) + Invokes(testing.NewGetAction(podsResource, c.ns, name), &v1.Pod{}) if obj == nil { return nil, err } - return obj.(*corev1.Pod), err + return obj.(*v1.Pod), err } // List takes label and field selectors, and returns the list of Pods that match those selectors. -func (c *FakePods) List(ctx context.Context, opts v1.ListOptions) (result *corev1.PodList, err error) { +func (c *FakePods) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(podsResource, podsKind, c.ns, opts), &corev1.PodList{}) + Invokes(testing.NewListAction(podsResource, podsKind, c.ns, opts), &v1.PodList{}) if obj == nil { return nil, err @@ -67,8 +66,8 @@ func (c *FakePods) List(ctx context.Context, opts v1.ListOptions) (result *corev if label == nil { label = labels.Everything() } - list := &corev1.PodList{ListMeta: obj.(*corev1.PodList).ListMeta} - for _, item := range obj.(*corev1.PodList).Items { + list := &v1.PodList{ListMeta: obj.(*v1.PodList).ListMeta} + for _, item := range obj.(*v1.PodList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -77,75 +76,75 @@ func (c *FakePods) List(ctx context.Context, opts v1.ListOptions) (result *corev } // Watch returns a watch.Interface that watches the requested pods. -func (c *FakePods) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakePods) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(podsResource, c.ns, opts)) } // Create takes the representation of a pod and creates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *FakePods) Create(ctx context.Context, pod *corev1.Pod, opts v1.CreateOptions) (result *corev1.Pod, err error) { +func (c *FakePods) Create(ctx context.Context, pod *v1.Pod, opts metav1.CreateOptions) (result *v1.Pod, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(podsResource, c.ns, pod), &corev1.Pod{}) + Invokes(testing.NewCreateAction(podsResource, c.ns, pod), &v1.Pod{}) if obj == nil { return nil, err } - return obj.(*corev1.Pod), err + return obj.(*v1.Pod), err } // Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *FakePods) Update(ctx context.Context, pod *corev1.Pod, opts v1.UpdateOptions) (result *corev1.Pod, err error) { +func (c *FakePods) Update(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(podsResource, c.ns, pod), &corev1.Pod{}) + Invokes(testing.NewUpdateAction(podsResource, c.ns, pod), &v1.Pod{}) if obj == nil { return nil, err } - return obj.(*corev1.Pod), err + return obj.(*v1.Pod), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePods) UpdateStatus(ctx context.Context, pod *corev1.Pod, opts v1.UpdateOptions) (*corev1.Pod, error) { +func (c *FakePods) UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error) { obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(podsResource, "status", c.ns, pod), &corev1.Pod{}) + Invokes(testing.NewUpdateSubresourceAction(podsResource, "status", c.ns, pod), &v1.Pod{}) if obj == nil { return nil, err } - return obj.(*corev1.Pod), err + return obj.(*v1.Pod), err } // Delete takes name of the pod and deletes it. Returns an error if one occurs. -func (c *FakePods) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakePods) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(podsResource, c.ns, name, opts), &corev1.Pod{}) + Invokes(testing.NewDeleteActionWithOptions(podsResource, c.ns, name, opts), &v1.Pod{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakePods) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakePods) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewDeleteCollectionAction(podsResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &corev1.PodList{}) + _, err := c.Fake.Invokes(action, &v1.PodList{}) return err } // Patch applies the patch and returns the patched pod. -func (c *FakePods) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.Pod, err error) { +func (c *FakePods) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Pod, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podsResource, c.ns, name, pt, data, subresources...), &corev1.Pod{}) + Invokes(testing.NewPatchSubresourceAction(podsResource, c.ns, name, pt, data, subresources...), &v1.Pod{}) if obj == nil { return nil, err } - return obj.(*corev1.Pod), err + return obj.(*v1.Pod), err } // Apply takes the given apply declarative configuration, applies it and returns the applied pod. -func (c *FakePods) Apply(ctx context.Context, pod *applyconfigurationscorev1.PodApplyConfiguration, opts v1.ApplyOptions) (result *corev1.Pod, err error) { +func (c *FakePods) Apply(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error) { if pod == nil { return nil, fmt.Errorf("pod provided to Apply must not be nil") } @@ -158,17 +157,17 @@ func (c *FakePods) Apply(ctx context.Context, pod *applyconfigurationscorev1.Pod return nil, fmt.Errorf("pod.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podsResource, c.ns, *name, types.ApplyPatchType, data), &corev1.Pod{}) + Invokes(testing.NewPatchSubresourceAction(podsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Pod{}) if obj == nil { return nil, err } - return obj.(*corev1.Pod), err + return obj.(*v1.Pod), err } // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakePods) ApplyStatus(ctx context.Context, pod *applyconfigurationscorev1.PodApplyConfiguration, opts v1.ApplyOptions) (result *corev1.Pod, err error) { +func (c *FakePods) ApplyStatus(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error) { if pod == nil { return nil, fmt.Errorf("pod provided to Apply must not be nil") } @@ -181,21 +180,21 @@ func (c *FakePods) ApplyStatus(ctx context.Context, pod *applyconfigurationscore return nil, fmt.Errorf("pod.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &corev1.Pod{}) + Invokes(testing.NewPatchSubresourceAction(podsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.Pod{}) if obj == nil { return nil, err } - return obj.(*corev1.Pod), err + return obj.(*v1.Pod), err } // UpdateEphemeralContainers takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *FakePods) UpdateEphemeralContainers(ctx context.Context, podName string, pod *corev1.Pod, opts v1.UpdateOptions) (result *corev1.Pod, err error) { +func (c *FakePods) UpdateEphemeralContainers(ctx context.Context, podName string, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(podsResource, "ephemeralcontainers", c.ns, pod), &corev1.Pod{}) + Invokes(testing.NewUpdateSubresourceAction(podsResource, "ephemeralcontainers", c.ns, pod), &v1.Pod{}) if obj == nil { return nil, err } - return obj.(*corev1.Pod), err + return obj.(*v1.Pod), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go index 00711f36fb30..9fa97ab402d4 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + corev1 "k8s.io/client-go/applyconfigurations/core/v1" testing "k8s.io/client-go/testing" ) @@ -39,25 +38,25 @@ type FakePodTemplates struct { ns string } -var podtemplatesResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "podtemplates"} +var podtemplatesResource = v1.SchemeGroupVersion.WithResource("podtemplates") -var podtemplatesKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "PodTemplate"} +var podtemplatesKind = v1.SchemeGroupVersion.WithKind("PodTemplate") // Get takes name of the podTemplate, and returns the corresponding podTemplate object, and an error if there is any. -func (c *FakePodTemplates) Get(ctx context.Context, name string, options v1.GetOptions) (result *corev1.PodTemplate, err error) { +func (c *FakePodTemplates) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PodTemplate, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(podtemplatesResource, c.ns, name), &corev1.PodTemplate{}) + Invokes(testing.NewGetAction(podtemplatesResource, c.ns, name), &v1.PodTemplate{}) if obj == nil { return nil, err } - return obj.(*corev1.PodTemplate), err + return obj.(*v1.PodTemplate), err } // List takes label and field selectors, and returns the list of PodTemplates that match those selectors. -func (c *FakePodTemplates) List(ctx context.Context, opts v1.ListOptions) (result *corev1.PodTemplateList, err error) { +func (c *FakePodTemplates) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodTemplateList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(podtemplatesResource, podtemplatesKind, c.ns, opts), &corev1.PodTemplateList{}) + Invokes(testing.NewListAction(podtemplatesResource, podtemplatesKind, c.ns, opts), &v1.PodTemplateList{}) if obj == nil { return nil, err @@ -67,8 +66,8 @@ func (c *FakePodTemplates) List(ctx context.Context, opts v1.ListOptions) (resul if label == nil { label = labels.Everything() } - list := &corev1.PodTemplateList{ListMeta: obj.(*corev1.PodTemplateList).ListMeta} - for _, item := range obj.(*corev1.PodTemplateList).Items { + list := &v1.PodTemplateList{ListMeta: obj.(*v1.PodTemplateList).ListMeta} + for _, item := range obj.(*v1.PodTemplateList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -77,63 +76,63 @@ func (c *FakePodTemplates) List(ctx context.Context, opts v1.ListOptions) (resul } // Watch returns a watch.Interface that watches the requested podTemplates. -func (c *FakePodTemplates) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakePodTemplates) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(podtemplatesResource, c.ns, opts)) } // Create takes the representation of a podTemplate and creates it. Returns the server's representation of the podTemplate, and an error, if there is any. -func (c *FakePodTemplates) Create(ctx context.Context, podTemplate *corev1.PodTemplate, opts v1.CreateOptions) (result *corev1.PodTemplate, err error) { +func (c *FakePodTemplates) Create(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.CreateOptions) (result *v1.PodTemplate, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(podtemplatesResource, c.ns, podTemplate), &corev1.PodTemplate{}) + Invokes(testing.NewCreateAction(podtemplatesResource, c.ns, podTemplate), &v1.PodTemplate{}) if obj == nil { return nil, err } - return obj.(*corev1.PodTemplate), err + return obj.(*v1.PodTemplate), err } // Update takes the representation of a podTemplate and updates it. Returns the server's representation of the podTemplate, and an error, if there is any. -func (c *FakePodTemplates) Update(ctx context.Context, podTemplate *corev1.PodTemplate, opts v1.UpdateOptions) (result *corev1.PodTemplate, err error) { +func (c *FakePodTemplates) Update(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.UpdateOptions) (result *v1.PodTemplate, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(podtemplatesResource, c.ns, podTemplate), &corev1.PodTemplate{}) + Invokes(testing.NewUpdateAction(podtemplatesResource, c.ns, podTemplate), &v1.PodTemplate{}) if obj == nil { return nil, err } - return obj.(*corev1.PodTemplate), err + return obj.(*v1.PodTemplate), err } // Delete takes name of the podTemplate and deletes it. Returns an error if one occurs. -func (c *FakePodTemplates) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakePodTemplates) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(podtemplatesResource, c.ns, name, opts), &corev1.PodTemplate{}) + Invokes(testing.NewDeleteActionWithOptions(podtemplatesResource, c.ns, name, opts), &v1.PodTemplate{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakePodTemplates) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakePodTemplates) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewDeleteCollectionAction(podtemplatesResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &corev1.PodTemplateList{}) + _, err := c.Fake.Invokes(action, &v1.PodTemplateList{}) return err } // Patch applies the patch and returns the patched podTemplate. -func (c *FakePodTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.PodTemplate, err error) { +func (c *FakePodTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodTemplate, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podtemplatesResource, c.ns, name, pt, data, subresources...), &corev1.PodTemplate{}) + Invokes(testing.NewPatchSubresourceAction(podtemplatesResource, c.ns, name, pt, data, subresources...), &v1.PodTemplate{}) if obj == nil { return nil, err } - return obj.(*corev1.PodTemplate), err + return obj.(*v1.PodTemplate), err } // Apply takes the given apply declarative configuration, applies it and returns the applied podTemplate. -func (c *FakePodTemplates) Apply(ctx context.Context, podTemplate *applyconfigurationscorev1.PodTemplateApplyConfiguration, opts v1.ApplyOptions) (result *corev1.PodTemplate, err error) { +func (c *FakePodTemplates) Apply(ctx context.Context, podTemplate *corev1.PodTemplateApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodTemplate, err error) { if podTemplate == nil { return nil, fmt.Errorf("podTemplate provided to Apply must not be nil") } @@ -146,10 +145,10 @@ func (c *FakePodTemplates) Apply(ctx context.Context, podTemplate *applyconfigur return nil, fmt.Errorf("podTemplate.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podtemplatesResource, c.ns, *name, types.ApplyPatchType, data), &corev1.PodTemplate{}) + Invokes(testing.NewPatchSubresourceAction(podtemplatesResource, c.ns, *name, types.ApplyPatchType, data), &v1.PodTemplate{}) if obj == nil { return nil, err } - return obj.(*corev1.PodTemplate), err + return obj.(*v1.PodTemplate), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go index 086f4dbf9f40..1e469c9b1ab1 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go @@ -24,13 +24,12 @@ import ( "fmt" autoscalingv1 "k8s.io/api/autoscaling/v1" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + corev1 "k8s.io/client-go/applyconfigurations/core/v1" testing "k8s.io/client-go/testing" ) @@ -40,25 +39,25 @@ type FakeReplicationControllers struct { ns string } -var replicationcontrollersResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "replicationcontrollers"} +var replicationcontrollersResource = v1.SchemeGroupVersion.WithResource("replicationcontrollers") -var replicationcontrollersKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ReplicationController"} +var replicationcontrollersKind = v1.SchemeGroupVersion.WithKind("ReplicationController") // Get takes name of the replicationController, and returns the corresponding replicationController object, and an error if there is any. -func (c *FakeReplicationControllers) Get(ctx context.Context, name string, options v1.GetOptions) (result *corev1.ReplicationController, err error) { +func (c *FakeReplicationControllers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ReplicationController, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(replicationcontrollersResource, c.ns, name), &corev1.ReplicationController{}) + Invokes(testing.NewGetAction(replicationcontrollersResource, c.ns, name), &v1.ReplicationController{}) if obj == nil { return nil, err } - return obj.(*corev1.ReplicationController), err + return obj.(*v1.ReplicationController), err } // List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors. -func (c *FakeReplicationControllers) List(ctx context.Context, opts v1.ListOptions) (result *corev1.ReplicationControllerList, err error) { +func (c *FakeReplicationControllers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ReplicationControllerList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(replicationcontrollersResource, replicationcontrollersKind, c.ns, opts), &corev1.ReplicationControllerList{}) + Invokes(testing.NewListAction(replicationcontrollersResource, replicationcontrollersKind, c.ns, opts), &v1.ReplicationControllerList{}) if obj == nil { return nil, err @@ -68,8 +67,8 @@ func (c *FakeReplicationControllers) List(ctx context.Context, opts v1.ListOptio if label == nil { label = labels.Everything() } - list := &corev1.ReplicationControllerList{ListMeta: obj.(*corev1.ReplicationControllerList).ListMeta} - for _, item := range obj.(*corev1.ReplicationControllerList).Items { + list := &v1.ReplicationControllerList{ListMeta: obj.(*v1.ReplicationControllerList).ListMeta} + for _, item := range obj.(*v1.ReplicationControllerList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -78,75 +77,75 @@ func (c *FakeReplicationControllers) List(ctx context.Context, opts v1.ListOptio } // Watch returns a watch.Interface that watches the requested replicationControllers. -func (c *FakeReplicationControllers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeReplicationControllers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(replicationcontrollersResource, c.ns, opts)) } // Create takes the representation of a replicationController and creates it. Returns the server's representation of the replicationController, and an error, if there is any. -func (c *FakeReplicationControllers) Create(ctx context.Context, replicationController *corev1.ReplicationController, opts v1.CreateOptions) (result *corev1.ReplicationController, err error) { +func (c *FakeReplicationControllers) Create(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.CreateOptions) (result *v1.ReplicationController, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(replicationcontrollersResource, c.ns, replicationController), &corev1.ReplicationController{}) + Invokes(testing.NewCreateAction(replicationcontrollersResource, c.ns, replicationController), &v1.ReplicationController{}) if obj == nil { return nil, err } - return obj.(*corev1.ReplicationController), err + return obj.(*v1.ReplicationController), err } // Update takes the representation of a replicationController and updates it. Returns the server's representation of the replicationController, and an error, if there is any. -func (c *FakeReplicationControllers) Update(ctx context.Context, replicationController *corev1.ReplicationController, opts v1.UpdateOptions) (result *corev1.ReplicationController, err error) { +func (c *FakeReplicationControllers) Update(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (result *v1.ReplicationController, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(replicationcontrollersResource, c.ns, replicationController), &corev1.ReplicationController{}) + Invokes(testing.NewUpdateAction(replicationcontrollersResource, c.ns, replicationController), &v1.ReplicationController{}) if obj == nil { return nil, err } - return obj.(*corev1.ReplicationController), err + return obj.(*v1.ReplicationController), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeReplicationControllers) UpdateStatus(ctx context.Context, replicationController *corev1.ReplicationController, opts v1.UpdateOptions) (*corev1.ReplicationController, error) { +func (c *FakeReplicationControllers) UpdateStatus(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (*v1.ReplicationController, error) { obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(replicationcontrollersResource, "status", c.ns, replicationController), &corev1.ReplicationController{}) + Invokes(testing.NewUpdateSubresourceAction(replicationcontrollersResource, "status", c.ns, replicationController), &v1.ReplicationController{}) if obj == nil { return nil, err } - return obj.(*corev1.ReplicationController), err + return obj.(*v1.ReplicationController), err } // Delete takes name of the replicationController and deletes it. Returns an error if one occurs. -func (c *FakeReplicationControllers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeReplicationControllers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(replicationcontrollersResource, c.ns, name, opts), &corev1.ReplicationController{}) + Invokes(testing.NewDeleteActionWithOptions(replicationcontrollersResource, c.ns, name, opts), &v1.ReplicationController{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeReplicationControllers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeReplicationControllers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewDeleteCollectionAction(replicationcontrollersResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &corev1.ReplicationControllerList{}) + _, err := c.Fake.Invokes(action, &v1.ReplicationControllerList{}) return err } // Patch applies the patch and returns the patched replicationController. -func (c *FakeReplicationControllers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.ReplicationController, err error) { +func (c *FakeReplicationControllers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicationController, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicationcontrollersResource, c.ns, name, pt, data, subresources...), &corev1.ReplicationController{}) + Invokes(testing.NewPatchSubresourceAction(replicationcontrollersResource, c.ns, name, pt, data, subresources...), &v1.ReplicationController{}) if obj == nil { return nil, err } - return obj.(*corev1.ReplicationController), err + return obj.(*v1.ReplicationController), err } // Apply takes the given apply declarative configuration, applies it and returns the applied replicationController. -func (c *FakeReplicationControllers) Apply(ctx context.Context, replicationController *applyconfigurationscorev1.ReplicationControllerApplyConfiguration, opts v1.ApplyOptions) (result *corev1.ReplicationController, err error) { +func (c *FakeReplicationControllers) Apply(ctx context.Context, replicationController *corev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicationController, err error) { if replicationController == nil { return nil, fmt.Errorf("replicationController provided to Apply must not be nil") } @@ -159,17 +158,17 @@ func (c *FakeReplicationControllers) Apply(ctx context.Context, replicationContr return nil, fmt.Errorf("replicationController.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicationcontrollersResource, c.ns, *name, types.ApplyPatchType, data), &corev1.ReplicationController{}) + Invokes(testing.NewPatchSubresourceAction(replicationcontrollersResource, c.ns, *name, types.ApplyPatchType, data), &v1.ReplicationController{}) if obj == nil { return nil, err } - return obj.(*corev1.ReplicationController), err + return obj.(*v1.ReplicationController), err } // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeReplicationControllers) ApplyStatus(ctx context.Context, replicationController *applyconfigurationscorev1.ReplicationControllerApplyConfiguration, opts v1.ApplyOptions) (result *corev1.ReplicationController, err error) { +func (c *FakeReplicationControllers) ApplyStatus(ctx context.Context, replicationController *corev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicationController, err error) { if replicationController == nil { return nil, fmt.Errorf("replicationController provided to Apply must not be nil") } @@ -182,16 +181,16 @@ func (c *FakeReplicationControllers) ApplyStatus(ctx context.Context, replicatio return nil, fmt.Errorf("replicationController.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicationcontrollersResource, c.ns, *name, types.ApplyPatchType, data, "status"), &corev1.ReplicationController{}) + Invokes(testing.NewPatchSubresourceAction(replicationcontrollersResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.ReplicationController{}) if obj == nil { return nil, err } - return obj.(*corev1.ReplicationController), err + return obj.(*v1.ReplicationController), err } // GetScale takes name of the replicationController, and returns the corresponding scale object, and an error if there is any. -func (c *FakeReplicationControllers) GetScale(ctx context.Context, replicationControllerName string, options v1.GetOptions) (result *autoscalingv1.Scale, err error) { +func (c *FakeReplicationControllers) GetScale(ctx context.Context, replicationControllerName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { obj, err := c.Fake. Invokes(testing.NewGetSubresourceAction(replicationcontrollersResource, c.ns, "scale", replicationControllerName), &autoscalingv1.Scale{}) @@ -202,7 +201,7 @@ func (c *FakeReplicationControllers) GetScale(ctx context.Context, replicationCo } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeReplicationControllers) UpdateScale(ctx context.Context, replicationControllerName string, scale *autoscalingv1.Scale, opts v1.UpdateOptions) (result *autoscalingv1.Scale, err error) { +func (c *FakeReplicationControllers) UpdateScale(ctx context.Context, replicationControllerName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(replicationcontrollersResource, "scale", c.ns, scale), &autoscalingv1.Scale{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go index 9e008a973279..87664985cea3 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + corev1 "k8s.io/client-go/applyconfigurations/core/v1" testing "k8s.io/client-go/testing" ) @@ -39,25 +38,25 @@ type FakeResourceQuotas struct { ns string } -var resourcequotasResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "resourcequotas"} +var resourcequotasResource = v1.SchemeGroupVersion.WithResource("resourcequotas") -var resourcequotasKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ResourceQuota"} +var resourcequotasKind = v1.SchemeGroupVersion.WithKind("ResourceQuota") // Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any. -func (c *FakeResourceQuotas) Get(ctx context.Context, name string, options v1.GetOptions) (result *corev1.ResourceQuota, err error) { +func (c *FakeResourceQuotas) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ResourceQuota, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(resourcequotasResource, c.ns, name), &corev1.ResourceQuota{}) + Invokes(testing.NewGetAction(resourcequotasResource, c.ns, name), &v1.ResourceQuota{}) if obj == nil { return nil, err } - return obj.(*corev1.ResourceQuota), err + return obj.(*v1.ResourceQuota), err } // List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors. -func (c *FakeResourceQuotas) List(ctx context.Context, opts v1.ListOptions) (result *corev1.ResourceQuotaList, err error) { +func (c *FakeResourceQuotas) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ResourceQuotaList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(resourcequotasResource, resourcequotasKind, c.ns, opts), &corev1.ResourceQuotaList{}) + Invokes(testing.NewListAction(resourcequotasResource, resourcequotasKind, c.ns, opts), &v1.ResourceQuotaList{}) if obj == nil { return nil, err @@ -67,8 +66,8 @@ func (c *FakeResourceQuotas) List(ctx context.Context, opts v1.ListOptions) (res if label == nil { label = labels.Everything() } - list := &corev1.ResourceQuotaList{ListMeta: obj.(*corev1.ResourceQuotaList).ListMeta} - for _, item := range obj.(*corev1.ResourceQuotaList).Items { + list := &v1.ResourceQuotaList{ListMeta: obj.(*v1.ResourceQuotaList).ListMeta} + for _, item := range obj.(*v1.ResourceQuotaList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -77,75 +76,75 @@ func (c *FakeResourceQuotas) List(ctx context.Context, opts v1.ListOptions) (res } // Watch returns a watch.Interface that watches the requested resourceQuotas. -func (c *FakeResourceQuotas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeResourceQuotas) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(resourcequotasResource, c.ns, opts)) } // Create takes the representation of a resourceQuota and creates it. Returns the server's representation of the resourceQuota, and an error, if there is any. -func (c *FakeResourceQuotas) Create(ctx context.Context, resourceQuota *corev1.ResourceQuota, opts v1.CreateOptions) (result *corev1.ResourceQuota, err error) { +func (c *FakeResourceQuotas) Create(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.CreateOptions) (result *v1.ResourceQuota, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(resourcequotasResource, c.ns, resourceQuota), &corev1.ResourceQuota{}) + Invokes(testing.NewCreateAction(resourcequotasResource, c.ns, resourceQuota), &v1.ResourceQuota{}) if obj == nil { return nil, err } - return obj.(*corev1.ResourceQuota), err + return obj.(*v1.ResourceQuota), err } // Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any. -func (c *FakeResourceQuotas) Update(ctx context.Context, resourceQuota *corev1.ResourceQuota, opts v1.UpdateOptions) (result *corev1.ResourceQuota, err error) { +func (c *FakeResourceQuotas) Update(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (result *v1.ResourceQuota, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(resourcequotasResource, c.ns, resourceQuota), &corev1.ResourceQuota{}) + Invokes(testing.NewUpdateAction(resourcequotasResource, c.ns, resourceQuota), &v1.ResourceQuota{}) if obj == nil { return nil, err } - return obj.(*corev1.ResourceQuota), err + return obj.(*v1.ResourceQuota), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeResourceQuotas) UpdateStatus(ctx context.Context, resourceQuota *corev1.ResourceQuota, opts v1.UpdateOptions) (*corev1.ResourceQuota, error) { +func (c *FakeResourceQuotas) UpdateStatus(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (*v1.ResourceQuota, error) { obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(resourcequotasResource, "status", c.ns, resourceQuota), &corev1.ResourceQuota{}) + Invokes(testing.NewUpdateSubresourceAction(resourcequotasResource, "status", c.ns, resourceQuota), &v1.ResourceQuota{}) if obj == nil { return nil, err } - return obj.(*corev1.ResourceQuota), err + return obj.(*v1.ResourceQuota), err } // Delete takes name of the resourceQuota and deletes it. Returns an error if one occurs. -func (c *FakeResourceQuotas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeResourceQuotas) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(resourcequotasResource, c.ns, name, opts), &corev1.ResourceQuota{}) + Invokes(testing.NewDeleteActionWithOptions(resourcequotasResource, c.ns, name, opts), &v1.ResourceQuota{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeResourceQuotas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeResourceQuotas) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewDeleteCollectionAction(resourcequotasResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &corev1.ResourceQuotaList{}) + _, err := c.Fake.Invokes(action, &v1.ResourceQuotaList{}) return err } // Patch applies the patch and returns the patched resourceQuota. -func (c *FakeResourceQuotas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.ResourceQuota, err error) { +func (c *FakeResourceQuotas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ResourceQuota, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourcequotasResource, c.ns, name, pt, data, subresources...), &corev1.ResourceQuota{}) + Invokes(testing.NewPatchSubresourceAction(resourcequotasResource, c.ns, name, pt, data, subresources...), &v1.ResourceQuota{}) if obj == nil { return nil, err } - return obj.(*corev1.ResourceQuota), err + return obj.(*v1.ResourceQuota), err } // Apply takes the given apply declarative configuration, applies it and returns the applied resourceQuota. -func (c *FakeResourceQuotas) Apply(ctx context.Context, resourceQuota *applyconfigurationscorev1.ResourceQuotaApplyConfiguration, opts v1.ApplyOptions) (result *corev1.ResourceQuota, err error) { +func (c *FakeResourceQuotas) Apply(ctx context.Context, resourceQuota *corev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ResourceQuota, err error) { if resourceQuota == nil { return nil, fmt.Errorf("resourceQuota provided to Apply must not be nil") } @@ -158,17 +157,17 @@ func (c *FakeResourceQuotas) Apply(ctx context.Context, resourceQuota *applyconf return nil, fmt.Errorf("resourceQuota.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourcequotasResource, c.ns, *name, types.ApplyPatchType, data), &corev1.ResourceQuota{}) + Invokes(testing.NewPatchSubresourceAction(resourcequotasResource, c.ns, *name, types.ApplyPatchType, data), &v1.ResourceQuota{}) if obj == nil { return nil, err } - return obj.(*corev1.ResourceQuota), err + return obj.(*v1.ResourceQuota), err } // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeResourceQuotas) ApplyStatus(ctx context.Context, resourceQuota *applyconfigurationscorev1.ResourceQuotaApplyConfiguration, opts v1.ApplyOptions) (result *corev1.ResourceQuota, err error) { +func (c *FakeResourceQuotas) ApplyStatus(ctx context.Context, resourceQuota *corev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ResourceQuota, err error) { if resourceQuota == nil { return nil, fmt.Errorf("resourceQuota provided to Apply must not be nil") } @@ -181,10 +180,10 @@ func (c *FakeResourceQuotas) ApplyStatus(ctx context.Context, resourceQuota *app return nil, fmt.Errorf("resourceQuota.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourcequotasResource, c.ns, *name, types.ApplyPatchType, data, "status"), &corev1.ResourceQuota{}) + Invokes(testing.NewPatchSubresourceAction(resourcequotasResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.ResourceQuota{}) if obj == nil { return nil, err } - return obj.(*corev1.ResourceQuota), err + return obj.(*v1.ResourceQuota), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go index b46730533021..90035a703712 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + corev1 "k8s.io/client-go/applyconfigurations/core/v1" testing "k8s.io/client-go/testing" ) @@ -39,25 +38,25 @@ type FakeSecrets struct { ns string } -var secretsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"} +var secretsResource = v1.SchemeGroupVersion.WithResource("secrets") -var secretsKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Secret"} +var secretsKind = v1.SchemeGroupVersion.WithKind("Secret") // Get takes name of the secret, and returns the corresponding secret object, and an error if there is any. -func (c *FakeSecrets) Get(ctx context.Context, name string, options v1.GetOptions) (result *corev1.Secret, err error) { +func (c *FakeSecrets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Secret, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(secretsResource, c.ns, name), &corev1.Secret{}) + Invokes(testing.NewGetAction(secretsResource, c.ns, name), &v1.Secret{}) if obj == nil { return nil, err } - return obj.(*corev1.Secret), err + return obj.(*v1.Secret), err } // List takes label and field selectors, and returns the list of Secrets that match those selectors. -func (c *FakeSecrets) List(ctx context.Context, opts v1.ListOptions) (result *corev1.SecretList, err error) { +func (c *FakeSecrets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.SecretList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(secretsResource, secretsKind, c.ns, opts), &corev1.SecretList{}) + Invokes(testing.NewListAction(secretsResource, secretsKind, c.ns, opts), &v1.SecretList{}) if obj == nil { return nil, err @@ -67,8 +66,8 @@ func (c *FakeSecrets) List(ctx context.Context, opts v1.ListOptions) (result *co if label == nil { label = labels.Everything() } - list := &corev1.SecretList{ListMeta: obj.(*corev1.SecretList).ListMeta} - for _, item := range obj.(*corev1.SecretList).Items { + list := &v1.SecretList{ListMeta: obj.(*v1.SecretList).ListMeta} + for _, item := range obj.(*v1.SecretList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -77,63 +76,63 @@ func (c *FakeSecrets) List(ctx context.Context, opts v1.ListOptions) (result *co } // Watch returns a watch.Interface that watches the requested secrets. -func (c *FakeSecrets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeSecrets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(secretsResource, c.ns, opts)) } // Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *FakeSecrets) Create(ctx context.Context, secret *corev1.Secret, opts v1.CreateOptions) (result *corev1.Secret, err error) { +func (c *FakeSecrets) Create(ctx context.Context, secret *v1.Secret, opts metav1.CreateOptions) (result *v1.Secret, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(secretsResource, c.ns, secret), &corev1.Secret{}) + Invokes(testing.NewCreateAction(secretsResource, c.ns, secret), &v1.Secret{}) if obj == nil { return nil, err } - return obj.(*corev1.Secret), err + return obj.(*v1.Secret), err } // Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *FakeSecrets) Update(ctx context.Context, secret *corev1.Secret, opts v1.UpdateOptions) (result *corev1.Secret, err error) { +func (c *FakeSecrets) Update(ctx context.Context, secret *v1.Secret, opts metav1.UpdateOptions) (result *v1.Secret, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(secretsResource, c.ns, secret), &corev1.Secret{}) + Invokes(testing.NewUpdateAction(secretsResource, c.ns, secret), &v1.Secret{}) if obj == nil { return nil, err } - return obj.(*corev1.Secret), err + return obj.(*v1.Secret), err } // Delete takes name of the secret and deletes it. Returns an error if one occurs. -func (c *FakeSecrets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeSecrets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(secretsResource, c.ns, name, opts), &corev1.Secret{}) + Invokes(testing.NewDeleteActionWithOptions(secretsResource, c.ns, name, opts), &v1.Secret{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeSecrets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeSecrets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewDeleteCollectionAction(secretsResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &corev1.SecretList{}) + _, err := c.Fake.Invokes(action, &v1.SecretList{}) return err } // Patch applies the patch and returns the patched secret. -func (c *FakeSecrets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.Secret, err error) { +func (c *FakeSecrets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Secret, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(secretsResource, c.ns, name, pt, data, subresources...), &corev1.Secret{}) + Invokes(testing.NewPatchSubresourceAction(secretsResource, c.ns, name, pt, data, subresources...), &v1.Secret{}) if obj == nil { return nil, err } - return obj.(*corev1.Secret), err + return obj.(*v1.Secret), err } // Apply takes the given apply declarative configuration, applies it and returns the applied secret. -func (c *FakeSecrets) Apply(ctx context.Context, secret *applyconfigurationscorev1.SecretApplyConfiguration, opts v1.ApplyOptions) (result *corev1.Secret, err error) { +func (c *FakeSecrets) Apply(ctx context.Context, secret *corev1.SecretApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Secret, err error) { if secret == nil { return nil, fmt.Errorf("secret provided to Apply must not be nil") } @@ -146,10 +145,10 @@ func (c *FakeSecrets) Apply(ctx context.Context, secret *applyconfigurationscore return nil, fmt.Errorf("secret.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(secretsResource, c.ns, *name, types.ApplyPatchType, data), &corev1.Secret{}) + Invokes(testing.NewPatchSubresourceAction(secretsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Secret{}) if obj == nil { return nil, err } - return obj.(*corev1.Secret), err + return obj.(*v1.Secret), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go index bfbabec0743b..514ab19e395f 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + corev1 "k8s.io/client-go/applyconfigurations/core/v1" testing "k8s.io/client-go/testing" ) @@ -39,25 +38,25 @@ type FakeServices struct { ns string } -var servicesResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"} +var servicesResource = v1.SchemeGroupVersion.WithResource("services") -var servicesKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Service"} +var servicesKind = v1.SchemeGroupVersion.WithKind("Service") // Get takes name of the service, and returns the corresponding service object, and an error if there is any. -func (c *FakeServices) Get(ctx context.Context, name string, options v1.GetOptions) (result *corev1.Service, err error) { +func (c *FakeServices) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Service, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(servicesResource, c.ns, name), &corev1.Service{}) + Invokes(testing.NewGetAction(servicesResource, c.ns, name), &v1.Service{}) if obj == nil { return nil, err } - return obj.(*corev1.Service), err + return obj.(*v1.Service), err } // List takes label and field selectors, and returns the list of Services that match those selectors. -func (c *FakeServices) List(ctx context.Context, opts v1.ListOptions) (result *corev1.ServiceList, err error) { +func (c *FakeServices) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServiceList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(servicesResource, servicesKind, c.ns, opts), &corev1.ServiceList{}) + Invokes(testing.NewListAction(servicesResource, servicesKind, c.ns, opts), &v1.ServiceList{}) if obj == nil { return nil, err @@ -67,8 +66,8 @@ func (c *FakeServices) List(ctx context.Context, opts v1.ListOptions) (result *c if label == nil { label = labels.Everything() } - list := &corev1.ServiceList{ListMeta: obj.(*corev1.ServiceList).ListMeta} - for _, item := range obj.(*corev1.ServiceList).Items { + list := &v1.ServiceList{ListMeta: obj.(*v1.ServiceList).ListMeta} + for _, item := range obj.(*v1.ServiceList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -77,67 +76,67 @@ func (c *FakeServices) List(ctx context.Context, opts v1.ListOptions) (result *c } // Watch returns a watch.Interface that watches the requested services. -func (c *FakeServices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeServices) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(servicesResource, c.ns, opts)) } // Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. -func (c *FakeServices) Create(ctx context.Context, service *corev1.Service, opts v1.CreateOptions) (result *corev1.Service, err error) { +func (c *FakeServices) Create(ctx context.Context, service *v1.Service, opts metav1.CreateOptions) (result *v1.Service, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(servicesResource, c.ns, service), &corev1.Service{}) + Invokes(testing.NewCreateAction(servicesResource, c.ns, service), &v1.Service{}) if obj == nil { return nil, err } - return obj.(*corev1.Service), err + return obj.(*v1.Service), err } // Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. -func (c *FakeServices) Update(ctx context.Context, service *corev1.Service, opts v1.UpdateOptions) (result *corev1.Service, err error) { +func (c *FakeServices) Update(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (result *v1.Service, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(servicesResource, c.ns, service), &corev1.Service{}) + Invokes(testing.NewUpdateAction(servicesResource, c.ns, service), &v1.Service{}) if obj == nil { return nil, err } - return obj.(*corev1.Service), err + return obj.(*v1.Service), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeServices) UpdateStatus(ctx context.Context, service *corev1.Service, opts v1.UpdateOptions) (*corev1.Service, error) { +func (c *FakeServices) UpdateStatus(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (*v1.Service, error) { obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(servicesResource, "status", c.ns, service), &corev1.Service{}) + Invokes(testing.NewUpdateSubresourceAction(servicesResource, "status", c.ns, service), &v1.Service{}) if obj == nil { return nil, err } - return obj.(*corev1.Service), err + return obj.(*v1.Service), err } // Delete takes name of the service and deletes it. Returns an error if one occurs. -func (c *FakeServices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeServices) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(servicesResource, c.ns, name, opts), &corev1.Service{}) + Invokes(testing.NewDeleteActionWithOptions(servicesResource, c.ns, name, opts), &v1.Service{}) return err } // Patch applies the patch and returns the patched service. -func (c *FakeServices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.Service, err error) { +func (c *FakeServices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Service, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(servicesResource, c.ns, name, pt, data, subresources...), &corev1.Service{}) + Invokes(testing.NewPatchSubresourceAction(servicesResource, c.ns, name, pt, data, subresources...), &v1.Service{}) if obj == nil { return nil, err } - return obj.(*corev1.Service), err + return obj.(*v1.Service), err } // Apply takes the given apply declarative configuration, applies it and returns the applied service. -func (c *FakeServices) Apply(ctx context.Context, service *applyconfigurationscorev1.ServiceApplyConfiguration, opts v1.ApplyOptions) (result *corev1.Service, err error) { +func (c *FakeServices) Apply(ctx context.Context, service *corev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Service, err error) { if service == nil { return nil, fmt.Errorf("service provided to Apply must not be nil") } @@ -150,17 +149,17 @@ func (c *FakeServices) Apply(ctx context.Context, service *applyconfigurationsco return nil, fmt.Errorf("service.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(servicesResource, c.ns, *name, types.ApplyPatchType, data), &corev1.Service{}) + Invokes(testing.NewPatchSubresourceAction(servicesResource, c.ns, *name, types.ApplyPatchType, data), &v1.Service{}) if obj == nil { return nil, err } - return obj.(*corev1.Service), err + return obj.(*v1.Service), err } // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeServices) ApplyStatus(ctx context.Context, service *applyconfigurationscorev1.ServiceApplyConfiguration, opts v1.ApplyOptions) (result *corev1.Service, err error) { +func (c *FakeServices) ApplyStatus(ctx context.Context, service *corev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Service, err error) { if service == nil { return nil, fmt.Errorf("service provided to Apply must not be nil") } @@ -173,10 +172,10 @@ func (c *FakeServices) ApplyStatus(ctx context.Context, service *applyconfigurat return nil, fmt.Errorf("service.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(servicesResource, c.ns, *name, types.ApplyPatchType, data, "status"), &corev1.Service{}) + Invokes(testing.NewPatchSubresourceAction(servicesResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.Service{}) if obj == nil { return nil, err } - return obj.(*corev1.Service), err + return obj.(*v1.Service), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go index ff468957a72e..115ff07123e7 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go @@ -24,13 +24,12 @@ import ( "fmt" authenticationv1 "k8s.io/api/authentication/v1" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + corev1 "k8s.io/client-go/applyconfigurations/core/v1" testing "k8s.io/client-go/testing" ) @@ -40,25 +39,25 @@ type FakeServiceAccounts struct { ns string } -var serviceaccountsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "serviceaccounts"} +var serviceaccountsResource = v1.SchemeGroupVersion.WithResource("serviceaccounts") -var serviceaccountsKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ServiceAccount"} +var serviceaccountsKind = v1.SchemeGroupVersion.WithKind("ServiceAccount") // Get takes name of the serviceAccount, and returns the corresponding serviceAccount object, and an error if there is any. -func (c *FakeServiceAccounts) Get(ctx context.Context, name string, options v1.GetOptions) (result *corev1.ServiceAccount, err error) { +func (c *FakeServiceAccounts) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ServiceAccount, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(serviceaccountsResource, c.ns, name), &corev1.ServiceAccount{}) + Invokes(testing.NewGetAction(serviceaccountsResource, c.ns, name), &v1.ServiceAccount{}) if obj == nil { return nil, err } - return obj.(*corev1.ServiceAccount), err + return obj.(*v1.ServiceAccount), err } // List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors. -func (c *FakeServiceAccounts) List(ctx context.Context, opts v1.ListOptions) (result *corev1.ServiceAccountList, err error) { +func (c *FakeServiceAccounts) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServiceAccountList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(serviceaccountsResource, serviceaccountsKind, c.ns, opts), &corev1.ServiceAccountList{}) + Invokes(testing.NewListAction(serviceaccountsResource, serviceaccountsKind, c.ns, opts), &v1.ServiceAccountList{}) if obj == nil { return nil, err @@ -68,8 +67,8 @@ func (c *FakeServiceAccounts) List(ctx context.Context, opts v1.ListOptions) (re if label == nil { label = labels.Everything() } - list := &corev1.ServiceAccountList{ListMeta: obj.(*corev1.ServiceAccountList).ListMeta} - for _, item := range obj.(*corev1.ServiceAccountList).Items { + list := &v1.ServiceAccountList{ListMeta: obj.(*v1.ServiceAccountList).ListMeta} + for _, item := range obj.(*v1.ServiceAccountList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -78,63 +77,63 @@ func (c *FakeServiceAccounts) List(ctx context.Context, opts v1.ListOptions) (re } // Watch returns a watch.Interface that watches the requested serviceAccounts. -func (c *FakeServiceAccounts) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeServiceAccounts) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(serviceaccountsResource, c.ns, opts)) } // Create takes the representation of a serviceAccount and creates it. Returns the server's representation of the serviceAccount, and an error, if there is any. -func (c *FakeServiceAccounts) Create(ctx context.Context, serviceAccount *corev1.ServiceAccount, opts v1.CreateOptions) (result *corev1.ServiceAccount, err error) { +func (c *FakeServiceAccounts) Create(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.CreateOptions) (result *v1.ServiceAccount, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(serviceaccountsResource, c.ns, serviceAccount), &corev1.ServiceAccount{}) + Invokes(testing.NewCreateAction(serviceaccountsResource, c.ns, serviceAccount), &v1.ServiceAccount{}) if obj == nil { return nil, err } - return obj.(*corev1.ServiceAccount), err + return obj.(*v1.ServiceAccount), err } // Update takes the representation of a serviceAccount and updates it. Returns the server's representation of the serviceAccount, and an error, if there is any. -func (c *FakeServiceAccounts) Update(ctx context.Context, serviceAccount *corev1.ServiceAccount, opts v1.UpdateOptions) (result *corev1.ServiceAccount, err error) { +func (c *FakeServiceAccounts) Update(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.UpdateOptions) (result *v1.ServiceAccount, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(serviceaccountsResource, c.ns, serviceAccount), &corev1.ServiceAccount{}) + Invokes(testing.NewUpdateAction(serviceaccountsResource, c.ns, serviceAccount), &v1.ServiceAccount{}) if obj == nil { return nil, err } - return obj.(*corev1.ServiceAccount), err + return obj.(*v1.ServiceAccount), err } // Delete takes name of the serviceAccount and deletes it. Returns an error if one occurs. -func (c *FakeServiceAccounts) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeServiceAccounts) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(serviceaccountsResource, c.ns, name, opts), &corev1.ServiceAccount{}) + Invokes(testing.NewDeleteActionWithOptions(serviceaccountsResource, c.ns, name, opts), &v1.ServiceAccount{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeServiceAccounts) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeServiceAccounts) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewDeleteCollectionAction(serviceaccountsResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &corev1.ServiceAccountList{}) + _, err := c.Fake.Invokes(action, &v1.ServiceAccountList{}) return err } // Patch applies the patch and returns the patched serviceAccount. -func (c *FakeServiceAccounts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.ServiceAccount, err error) { +func (c *FakeServiceAccounts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ServiceAccount, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(serviceaccountsResource, c.ns, name, pt, data, subresources...), &corev1.ServiceAccount{}) + Invokes(testing.NewPatchSubresourceAction(serviceaccountsResource, c.ns, name, pt, data, subresources...), &v1.ServiceAccount{}) if obj == nil { return nil, err } - return obj.(*corev1.ServiceAccount), err + return obj.(*v1.ServiceAccount), err } // Apply takes the given apply declarative configuration, applies it and returns the applied serviceAccount. -func (c *FakeServiceAccounts) Apply(ctx context.Context, serviceAccount *applyconfigurationscorev1.ServiceAccountApplyConfiguration, opts v1.ApplyOptions) (result *corev1.ServiceAccount, err error) { +func (c *FakeServiceAccounts) Apply(ctx context.Context, serviceAccount *corev1.ServiceAccountApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ServiceAccount, err error) { if serviceAccount == nil { return nil, fmt.Errorf("serviceAccount provided to Apply must not be nil") } @@ -147,16 +146,16 @@ func (c *FakeServiceAccounts) Apply(ctx context.Context, serviceAccount *applyco return nil, fmt.Errorf("serviceAccount.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(serviceaccountsResource, c.ns, *name, types.ApplyPatchType, data), &corev1.ServiceAccount{}) + Invokes(testing.NewPatchSubresourceAction(serviceaccountsResource, c.ns, *name, types.ApplyPatchType, data), &v1.ServiceAccount{}) if obj == nil { return nil, err } - return obj.(*corev1.ServiceAccount), err + return obj.(*v1.ServiceAccount), err } // CreateToken takes the representation of a tokenRequest and creates it. Returns the server's representation of the tokenRequest, and an error, if there is any. -func (c *FakeServiceAccounts) CreateToken(ctx context.Context, serviceAccountName string, tokenRequest *authenticationv1.TokenRequest, opts v1.CreateOptions) (result *authenticationv1.TokenRequest, err error) { +func (c *FakeServiceAccounts) CreateToken(ctx context.Context, serviceAccountName string, tokenRequest *authenticationv1.TokenRequest, opts metav1.CreateOptions) (result *authenticationv1.TokenRequest, err error) { obj, err := c.Fake. Invokes(testing.NewCreateSubresourceAction(serviceaccountsResource, serviceAccountName, "token", c.ns, tokenRequest), &authenticationv1.TokenRequest{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/fake_endpointslice.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/fake_endpointslice.go index 20e58b151f66..d159b5ea9e9a 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/fake_endpointslice.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/fake_endpointslice.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - discoveryv1 "k8s.io/api/discovery/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/discovery/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationsdiscoveryv1 "k8s.io/client-go/applyconfigurations/discovery/v1" + discoveryv1 "k8s.io/client-go/applyconfigurations/discovery/v1" testing "k8s.io/client-go/testing" ) @@ -39,25 +38,25 @@ type FakeEndpointSlices struct { ns string } -var endpointslicesResource = schema.GroupVersionResource{Group: "discovery.k8s.io", Version: "v1", Resource: "endpointslices"} +var endpointslicesResource = v1.SchemeGroupVersion.WithResource("endpointslices") -var endpointslicesKind = schema.GroupVersionKind{Group: "discovery.k8s.io", Version: "v1", Kind: "EndpointSlice"} +var endpointslicesKind = v1.SchemeGroupVersion.WithKind("EndpointSlice") // Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any. -func (c *FakeEndpointSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *discoveryv1.EndpointSlice, err error) { +func (c *FakeEndpointSlices) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.EndpointSlice, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(endpointslicesResource, c.ns, name), &discoveryv1.EndpointSlice{}) + Invokes(testing.NewGetAction(endpointslicesResource, c.ns, name), &v1.EndpointSlice{}) if obj == nil { return nil, err } - return obj.(*discoveryv1.EndpointSlice), err + return obj.(*v1.EndpointSlice), err } // List takes label and field selectors, and returns the list of EndpointSlices that match those selectors. -func (c *FakeEndpointSlices) List(ctx context.Context, opts v1.ListOptions) (result *discoveryv1.EndpointSliceList, err error) { +func (c *FakeEndpointSlices) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EndpointSliceList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(endpointslicesResource, endpointslicesKind, c.ns, opts), &discoveryv1.EndpointSliceList{}) + Invokes(testing.NewListAction(endpointslicesResource, endpointslicesKind, c.ns, opts), &v1.EndpointSliceList{}) if obj == nil { return nil, err @@ -67,8 +66,8 @@ func (c *FakeEndpointSlices) List(ctx context.Context, opts v1.ListOptions) (res if label == nil { label = labels.Everything() } - list := &discoveryv1.EndpointSliceList{ListMeta: obj.(*discoveryv1.EndpointSliceList).ListMeta} - for _, item := range obj.(*discoveryv1.EndpointSliceList).Items { + list := &v1.EndpointSliceList{ListMeta: obj.(*v1.EndpointSliceList).ListMeta} + for _, item := range obj.(*v1.EndpointSliceList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -77,63 +76,63 @@ func (c *FakeEndpointSlices) List(ctx context.Context, opts v1.ListOptions) (res } // Watch returns a watch.Interface that watches the requested endpointSlices. -func (c *FakeEndpointSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeEndpointSlices) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(endpointslicesResource, c.ns, opts)) } // Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *FakeEndpointSlices) Create(ctx context.Context, endpointSlice *discoveryv1.EndpointSlice, opts v1.CreateOptions) (result *discoveryv1.EndpointSlice, err error) { +func (c *FakeEndpointSlices) Create(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.CreateOptions) (result *v1.EndpointSlice, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(endpointslicesResource, c.ns, endpointSlice), &discoveryv1.EndpointSlice{}) + Invokes(testing.NewCreateAction(endpointslicesResource, c.ns, endpointSlice), &v1.EndpointSlice{}) if obj == nil { return nil, err } - return obj.(*discoveryv1.EndpointSlice), err + return obj.(*v1.EndpointSlice), err } // Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *FakeEndpointSlices) Update(ctx context.Context, endpointSlice *discoveryv1.EndpointSlice, opts v1.UpdateOptions) (result *discoveryv1.EndpointSlice, err error) { +func (c *FakeEndpointSlices) Update(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.UpdateOptions) (result *v1.EndpointSlice, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(endpointslicesResource, c.ns, endpointSlice), &discoveryv1.EndpointSlice{}) + Invokes(testing.NewUpdateAction(endpointslicesResource, c.ns, endpointSlice), &v1.EndpointSlice{}) if obj == nil { return nil, err } - return obj.(*discoveryv1.EndpointSlice), err + return obj.(*v1.EndpointSlice), err } // Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs. -func (c *FakeEndpointSlices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeEndpointSlices) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(endpointslicesResource, c.ns, name, opts), &discoveryv1.EndpointSlice{}) + Invokes(testing.NewDeleteActionWithOptions(endpointslicesResource, c.ns, name, opts), &v1.EndpointSlice{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeEndpointSlices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeEndpointSlices) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewDeleteCollectionAction(endpointslicesResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &discoveryv1.EndpointSliceList{}) + _, err := c.Fake.Invokes(action, &v1.EndpointSliceList{}) return err } // Patch applies the patch and returns the patched endpointSlice. -func (c *FakeEndpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *discoveryv1.EndpointSlice, err error) { +func (c *FakeEndpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.EndpointSlice, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(endpointslicesResource, c.ns, name, pt, data, subresources...), &discoveryv1.EndpointSlice{}) + Invokes(testing.NewPatchSubresourceAction(endpointslicesResource, c.ns, name, pt, data, subresources...), &v1.EndpointSlice{}) if obj == nil { return nil, err } - return obj.(*discoveryv1.EndpointSlice), err + return obj.(*v1.EndpointSlice), err } // Apply takes the given apply declarative configuration, applies it and returns the applied endpointSlice. -func (c *FakeEndpointSlices) Apply(ctx context.Context, endpointSlice *applyconfigurationsdiscoveryv1.EndpointSliceApplyConfiguration, opts v1.ApplyOptions) (result *discoveryv1.EndpointSlice, err error) { +func (c *FakeEndpointSlices) Apply(ctx context.Context, endpointSlice *discoveryv1.EndpointSliceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.EndpointSlice, err error) { if endpointSlice == nil { return nil, fmt.Errorf("endpointSlice provided to Apply must not be nil") } @@ -146,10 +145,10 @@ func (c *FakeEndpointSlices) Apply(ctx context.Context, endpointSlice *applyconf return nil, fmt.Errorf("endpointSlice.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(endpointslicesResource, c.ns, *name, types.ApplyPatchType, data), &discoveryv1.EndpointSlice{}) + Invokes(testing.NewPatchSubresourceAction(endpointslicesResource, c.ns, *name, types.ApplyPatchType, data), &v1.EndpointSlice{}) if obj == nil { return nil, err } - return obj.(*discoveryv1.EndpointSlice), err + return obj.(*v1.EndpointSlice), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go index 249de3446cb9..268371811346 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go @@ -26,7 +26,6 @@ import ( v1beta1 "k8s.io/api/discovery/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" discoveryv1beta1 "k8s.io/client-go/applyconfigurations/discovery/v1beta1" @@ -39,9 +38,9 @@ type FakeEndpointSlices struct { ns string } -var endpointslicesResource = schema.GroupVersionResource{Group: "discovery.k8s.io", Version: "v1beta1", Resource: "endpointslices"} +var endpointslicesResource = v1beta1.SchemeGroupVersion.WithResource("endpointslices") -var endpointslicesKind = schema.GroupVersionKind{Group: "discovery.k8s.io", Version: "v1beta1", Kind: "EndpointSlice"} +var endpointslicesKind = v1beta1.SchemeGroupVersion.WithKind("EndpointSlice") // Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any. func (c *FakeEndpointSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.EndpointSlice, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/fake_event.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/fake_event.go index 87ff13c822c7..0928781f1e30 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/fake_event.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/fake_event.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - eventsv1 "k8s.io/api/events/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/events/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationseventsv1 "k8s.io/client-go/applyconfigurations/events/v1" + eventsv1 "k8s.io/client-go/applyconfigurations/events/v1" testing "k8s.io/client-go/testing" ) @@ -39,25 +38,25 @@ type FakeEvents struct { ns string } -var eventsResource = schema.GroupVersionResource{Group: "events.k8s.io", Version: "v1", Resource: "events"} +var eventsResource = v1.SchemeGroupVersion.WithResource("events") -var eventsKind = schema.GroupVersionKind{Group: "events.k8s.io", Version: "v1", Kind: "Event"} +var eventsKind = v1.SchemeGroupVersion.WithKind("Event") // Get takes name of the event, and returns the corresponding event object, and an error if there is any. -func (c *FakeEvents) Get(ctx context.Context, name string, options v1.GetOptions) (result *eventsv1.Event, err error) { +func (c *FakeEvents) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Event, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(eventsResource, c.ns, name), &eventsv1.Event{}) + Invokes(testing.NewGetAction(eventsResource, c.ns, name), &v1.Event{}) if obj == nil { return nil, err } - return obj.(*eventsv1.Event), err + return obj.(*v1.Event), err } // List takes label and field selectors, and returns the list of Events that match those selectors. -func (c *FakeEvents) List(ctx context.Context, opts v1.ListOptions) (result *eventsv1.EventList, err error) { +func (c *FakeEvents) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EventList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(eventsResource, eventsKind, c.ns, opts), &eventsv1.EventList{}) + Invokes(testing.NewListAction(eventsResource, eventsKind, c.ns, opts), &v1.EventList{}) if obj == nil { return nil, err @@ -67,8 +66,8 @@ func (c *FakeEvents) List(ctx context.Context, opts v1.ListOptions) (result *eve if label == nil { label = labels.Everything() } - list := &eventsv1.EventList{ListMeta: obj.(*eventsv1.EventList).ListMeta} - for _, item := range obj.(*eventsv1.EventList).Items { + list := &v1.EventList{ListMeta: obj.(*v1.EventList).ListMeta} + for _, item := range obj.(*v1.EventList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -77,63 +76,63 @@ func (c *FakeEvents) List(ctx context.Context, opts v1.ListOptions) (result *eve } // Watch returns a watch.Interface that watches the requested events. -func (c *FakeEvents) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeEvents) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(eventsResource, c.ns, opts)) } // Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *FakeEvents) Create(ctx context.Context, event *eventsv1.Event, opts v1.CreateOptions) (result *eventsv1.Event, err error) { +func (c *FakeEvents) Create(ctx context.Context, event *v1.Event, opts metav1.CreateOptions) (result *v1.Event, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(eventsResource, c.ns, event), &eventsv1.Event{}) + Invokes(testing.NewCreateAction(eventsResource, c.ns, event), &v1.Event{}) if obj == nil { return nil, err } - return obj.(*eventsv1.Event), err + return obj.(*v1.Event), err } // Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *FakeEvents) Update(ctx context.Context, event *eventsv1.Event, opts v1.UpdateOptions) (result *eventsv1.Event, err error) { +func (c *FakeEvents) Update(ctx context.Context, event *v1.Event, opts metav1.UpdateOptions) (result *v1.Event, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(eventsResource, c.ns, event), &eventsv1.Event{}) + Invokes(testing.NewUpdateAction(eventsResource, c.ns, event), &v1.Event{}) if obj == nil { return nil, err } - return obj.(*eventsv1.Event), err + return obj.(*v1.Event), err } // Delete takes name of the event and deletes it. Returns an error if one occurs. -func (c *FakeEvents) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeEvents) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(eventsResource, c.ns, name, opts), &eventsv1.Event{}) + Invokes(testing.NewDeleteActionWithOptions(eventsResource, c.ns, name, opts), &v1.Event{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeEvents) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeEvents) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewDeleteCollectionAction(eventsResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &eventsv1.EventList{}) + _, err := c.Fake.Invokes(action, &v1.EventList{}) return err } // Patch applies the patch and returns the patched event. -func (c *FakeEvents) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *eventsv1.Event, err error) { +func (c *FakeEvents) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Event, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, pt, data, subresources...), &eventsv1.Event{}) + Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, pt, data, subresources...), &v1.Event{}) if obj == nil { return nil, err } - return obj.(*eventsv1.Event), err + return obj.(*v1.Event), err } // Apply takes the given apply declarative configuration, applies it and returns the applied event. -func (c *FakeEvents) Apply(ctx context.Context, event *applyconfigurationseventsv1.EventApplyConfiguration, opts v1.ApplyOptions) (result *eventsv1.Event, err error) { +func (c *FakeEvents) Apply(ctx context.Context, event *eventsv1.EventApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Event, err error) { if event == nil { return nil, fmt.Errorf("event provided to Apply must not be nil") } @@ -146,10 +145,10 @@ func (c *FakeEvents) Apply(ctx context.Context, event *applyconfigurationsevents return nil, fmt.Errorf("event.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, *name, types.ApplyPatchType, data), &eventsv1.Event{}) + Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Event{}) if obj == nil { return nil, err } - return obj.(*eventsv1.Event), err + return obj.(*v1.Event), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go index 464fff911671..562f8d5e45e5 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go @@ -82,8 +82,7 @@ func (e *events) UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, // It returns the copy of the event that the server returns, or an error. // The namespace and name of the target event is deduced from the event. // The namespace must either match this event client's namespace, or this event client must -// -// have been created with the "" namespace. +// have been created with the "" namespace. func (e *events) PatchWithEventNamespace(event *v1beta1.Event, data []byte) (*v1beta1.Event, error) { if e.ns != "" && event.Namespace != e.ns { return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go index 5481c4d3f883..522b4dc06322 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go @@ -26,7 +26,6 @@ import ( v1beta1 "k8s.io/api/events/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" eventsv1beta1 "k8s.io/client-go/applyconfigurations/events/v1beta1" @@ -39,9 +38,9 @@ type FakeEvents struct { ns string } -var eventsResource = schema.GroupVersionResource{Group: "events.k8s.io", Version: "v1beta1", Resource: "events"} +var eventsResource = v1beta1.SchemeGroupVersion.WithResource("events") -var eventsKind = schema.GroupVersionKind{Group: "events.k8s.io", Version: "v1beta1", Kind: "Event"} +var eventsKind = v1beta1.SchemeGroupVersion.WithKind("Event") // Get takes name of the event, and returns the corresponding event object, and an error if there is any. func (c *FakeEvents) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Event, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go index 827b514df6f4..4725d2cd16f0 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go @@ -32,7 +32,6 @@ type ExtensionsV1beta1Interface interface { DeploymentsGetter IngressesGetter NetworkPoliciesGetter - PodSecurityPoliciesGetter ReplicaSetsGetter } @@ -57,10 +56,6 @@ func (c *ExtensionsV1beta1Client) NetworkPolicies(namespace string) NetworkPolic return newNetworkPolicies(c, namespace) } -func (c *ExtensionsV1beta1Client) PodSecurityPolicies() PodSecurityPolicyInterface { - return newPodSecurityPolicies(c) -} - func (c *ExtensionsV1beta1Client) ReplicaSets(namespace string) ReplicaSetInterface { return newReplicaSets(c, namespace) } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go index 11c05f195159..abe3d2da1f9b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go @@ -26,7 +26,6 @@ import ( v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" @@ -39,9 +38,9 @@ type FakeDaemonSets struct { ns string } -var daemonsetsResource = schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "daemonsets"} +var daemonsetsResource = v1beta1.SchemeGroupVersion.WithResource("daemonsets") -var daemonsetsKind = schema.GroupVersionKind{Group: "extensions", Version: "v1beta1", Kind: "DaemonSet"} +var daemonsetsKind = v1beta1.SchemeGroupVersion.WithKind("DaemonSet") // Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. func (c *FakeDaemonSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.DaemonSet, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go index 9edf85d60eca..e399361a92e7 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go @@ -26,7 +26,6 @@ import ( v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" @@ -39,9 +38,9 @@ type FakeDeployments struct { ns string } -var deploymentsResource = schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "deployments"} +var deploymentsResource = v1beta1.SchemeGroupVersion.WithResource("deployments") -var deploymentsKind = schema.GroupVersionKind{Group: "extensions", Version: "v1beta1", Kind: "Deployment"} +var deploymentsKind = v1beta1.SchemeGroupVersion.WithKind("Deployment") // Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. func (c *FakeDeployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go index 36c0d51bc3ff..a54c182eaeda 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go @@ -44,10 +44,6 @@ func (c *FakeExtensionsV1beta1) NetworkPolicies(namespace string) v1beta1.Networ return &FakeNetworkPolicies{c, namespace} } -func (c *FakeExtensionsV1beta1) PodSecurityPolicies() v1beta1.PodSecurityPolicyInterface { - return &FakePodSecurityPolicies{c} -} - func (c *FakeExtensionsV1beta1) ReplicaSets(namespace string) v1beta1.ReplicaSetInterface { return &FakeReplicaSets{c, namespace} } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go index d3bbd1abce3e..48ae51e80da8 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go @@ -26,7 +26,6 @@ import ( v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" @@ -39,9 +38,9 @@ type FakeIngresses struct { ns string } -var ingressesResource = schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "ingresses"} +var ingressesResource = v1beta1.SchemeGroupVersion.WithResource("ingresses") -var ingressesKind = schema.GroupVersionKind{Group: "extensions", Version: "v1beta1", Kind: "Ingress"} +var ingressesKind = v1beta1.SchemeGroupVersion.WithKind("Ingress") // Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. func (c *FakeIngresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go index 1ff5d8c99268..7bf8c6972aa8 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go @@ -26,7 +26,6 @@ import ( v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" @@ -39,9 +38,9 @@ type FakeNetworkPolicies struct { ns string } -var networkpoliciesResource = schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "networkpolicies"} +var networkpoliciesResource = v1beta1.SchemeGroupVersion.WithResource("networkpolicies") -var networkpoliciesKind = schema.GroupVersionKind{Group: "extensions", Version: "v1beta1", Kind: "NetworkPolicy"} +var networkpoliciesKind = v1beta1.SchemeGroupVersion.WithKind("NetworkPolicy") // Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any. func (c *FakeNetworkPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.NetworkPolicy, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go deleted file mode 100644 index dd53fd5bd75f..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go +++ /dev/null @@ -1,146 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakePodSecurityPolicies implements PodSecurityPolicyInterface -type FakePodSecurityPolicies struct { - Fake *FakeExtensionsV1beta1 -} - -var podsecuritypoliciesResource = schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "podsecuritypolicies"} - -var podsecuritypoliciesKind = schema.GroupVersionKind{Group: "extensions", Version: "v1beta1", Kind: "PodSecurityPolicy"} - -// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. -func (c *FakePodSecurityPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(podsecuritypoliciesResource, name), &v1beta1.PodSecurityPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PodSecurityPolicy), err -} - -// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. -func (c *FakePodSecurityPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(podsecuritypoliciesResource, podsecuritypoliciesKind, opts), &v1beta1.PodSecurityPolicyList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.PodSecurityPolicyList{ListMeta: obj.(*v1beta1.PodSecurityPolicyList).ListMeta} - for _, item := range obj.(*v1beta1.PodSecurityPolicyList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested podSecurityPolicies. -func (c *FakePodSecurityPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(podsecuritypoliciesResource, opts)) -} - -// Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *FakePodSecurityPolicies) Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (result *v1beta1.PodSecurityPolicy, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(podsecuritypoliciesResource, podSecurityPolicy), &v1beta1.PodSecurityPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PodSecurityPolicy), err -} - -// Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *FakePodSecurityPolicies) Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (result *v1beta1.PodSecurityPolicy, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(podsecuritypoliciesResource, podSecurityPolicy), &v1beta1.PodSecurityPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PodSecurityPolicy), err -} - -// Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. -func (c *FakePodSecurityPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(podsecuritypoliciesResource, name, opts), &v1beta1.PodSecurityPolicy{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePodSecurityPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(podsecuritypoliciesResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.PodSecurityPolicyList{}) - return err -} - -// Patch applies the patch and returns the patched podSecurityPolicy. -func (c *FakePodSecurityPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(podsecuritypoliciesResource, name, pt, data, subresources...), &v1beta1.PodSecurityPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PodSecurityPolicy), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podSecurityPolicy. -func (c *FakePodSecurityPolicies) Apply(ctx context.Context, podSecurityPolicy *extensionsv1beta1.PodSecurityPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodSecurityPolicy, err error) { - if podSecurityPolicy == nil { - return nil, fmt.Errorf("podSecurityPolicy provided to Apply must not be nil") - } - data, err := json.Marshal(podSecurityPolicy) - if err != nil { - return nil, err - } - name := podSecurityPolicy.Name - if name == nil { - return nil, fmt.Errorf("podSecurityPolicy.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(podsecuritypoliciesResource, *name, types.ApplyPatchType, data), &v1beta1.PodSecurityPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PodSecurityPolicy), err -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go index d6d35e4938cb..42da6fa8b642 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go @@ -26,7 +26,6 @@ import ( v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" @@ -39,9 +38,9 @@ type FakeReplicaSets struct { ns string } -var replicasetsResource = schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "replicasets"} +var replicasetsResource = v1beta1.SchemeGroupVersion.WithResource("replicasets") -var replicasetsKind = schema.GroupVersionKind{Group: "extensions", Version: "v1beta1", Kind: "ReplicaSet"} +var replicasetsKind = v1beta1.SchemeGroupVersion.WithKind("ReplicaSet") // Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. func (c *FakeReplicaSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ReplicaSet, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go index 41d28f0417c9..67fcf4992b06 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go @@ -24,6 +24,4 @@ type IngressExpansion interface{} type NetworkPolicyExpansion interface{} -type PodSecurityPolicyExpansion interface{} - type ReplicaSetExpansion interface{} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go deleted file mode 100644 index 3f38c3133d6a..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// PodSecurityPoliciesGetter has a method to return a PodSecurityPolicyInterface. -// A group's client should implement this interface. -type PodSecurityPoliciesGetter interface { - PodSecurityPolicies() PodSecurityPolicyInterface -} - -// PodSecurityPolicyInterface has methods to work with PodSecurityPolicy resources. -type PodSecurityPolicyInterface interface { - Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (*v1beta1.PodSecurityPolicy, error) - Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (*v1beta1.PodSecurityPolicy, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PodSecurityPolicy, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PodSecurityPolicyList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) - Apply(ctx context.Context, podSecurityPolicy *extensionsv1beta1.PodSecurityPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodSecurityPolicy, err error) - PodSecurityPolicyExpansion -} - -// podSecurityPolicies implements PodSecurityPolicyInterface -type podSecurityPolicies struct { - client rest.Interface -} - -// newPodSecurityPolicies returns a PodSecurityPolicies -func newPodSecurityPolicies(c *ExtensionsV1beta1Client) *podSecurityPolicies { - return &podSecurityPolicies{ - client: c.RESTClient(), - } -} - -// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. -func (c *podSecurityPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Get(). - Resource("podsecuritypolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. -func (c *podSecurityPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.PodSecurityPolicyList{} - err = c.client.Get(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podSecurityPolicies. -func (c *podSecurityPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Post(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podSecurityPolicy). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Put(). - Resource("podsecuritypolicies"). - Name(podSecurityPolicy.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podSecurityPolicy). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. -func (c *podSecurityPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("podsecuritypolicies"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podSecurityPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("podsecuritypolicies"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched podSecurityPolicy. -func (c *podSecurityPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Patch(pt). - Resource("podsecuritypolicies"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podSecurityPolicy. -func (c *podSecurityPolicies) Apply(ctx context.Context, podSecurityPolicy *extensionsv1beta1.PodSecurityPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodSecurityPolicy, err error) { - if podSecurityPolicy == nil { - return nil, fmt.Errorf("podSecurityPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podSecurityPolicy) - if err != nil { - return nil, err - } - name := podSecurityPolicy.Name - if name == nil { - return nil, fmt.Errorf("podSecurityPolicy.Name must be provided to Apply") - } - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Resource("podsecuritypolicies"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowschema.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowschema.go index 06730a1ea0c8..f3676388923b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowschema.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowschema.go @@ -26,7 +26,6 @@ import ( v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1alpha1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1" @@ -38,9 +37,9 @@ type FakeFlowSchemas struct { Fake *FakeFlowcontrolV1alpha1 } -var flowschemasResource = schema.GroupVersionResource{Group: "flowcontrol.apiserver.k8s.io", Version: "v1alpha1", Resource: "flowschemas"} +var flowschemasResource = v1alpha1.SchemeGroupVersion.WithResource("flowschemas") -var flowschemasKind = schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1alpha1", Kind: "FlowSchema"} +var flowschemasKind = v1alpha1.SchemeGroupVersion.WithKind("FlowSchema") // Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. func (c *FakeFlowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.FlowSchema, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_prioritylevelconfiguration.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_prioritylevelconfiguration.go index 0e7228254196..6512c36f69b1 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_prioritylevelconfiguration.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_prioritylevelconfiguration.go @@ -26,7 +26,6 @@ import ( v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1alpha1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1" @@ -38,9 +37,9 @@ type FakePriorityLevelConfigurations struct { Fake *FakeFlowcontrolV1alpha1 } -var prioritylevelconfigurationsResource = schema.GroupVersionResource{Group: "flowcontrol.apiserver.k8s.io", Version: "v1alpha1", Resource: "prioritylevelconfigurations"} +var prioritylevelconfigurationsResource = v1alpha1.SchemeGroupVersion.WithResource("prioritylevelconfigurations") -var prioritylevelconfigurationsKind = schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1alpha1", Kind: "PriorityLevelConfiguration"} +var prioritylevelconfigurationsKind = v1alpha1.SchemeGroupVersion.WithKind("PriorityLevelConfiguration") // Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. func (c *FakePriorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowschema.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowschema.go index 58d5ed14efd8..be7a7e390fef 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowschema.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowschema.go @@ -26,7 +26,6 @@ import ( v1beta1 "k8s.io/api/flowcontrol/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1beta1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1" @@ -38,9 +37,9 @@ type FakeFlowSchemas struct { Fake *FakeFlowcontrolV1beta1 } -var flowschemasResource = schema.GroupVersionResource{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Resource: "flowschemas"} +var flowschemasResource = v1beta1.SchemeGroupVersion.WithResource("flowschemas") -var flowschemasKind = schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Kind: "FlowSchema"} +var flowschemasKind = v1beta1.SchemeGroupVersion.WithKind("FlowSchema") // Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. func (c *FakeFlowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.FlowSchema, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_prioritylevelconfiguration.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_prioritylevelconfiguration.go index 19fc62beb394..698a168b3702 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_prioritylevelconfiguration.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_prioritylevelconfiguration.go @@ -26,7 +26,6 @@ import ( v1beta1 "k8s.io/api/flowcontrol/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1beta1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1" @@ -38,9 +37,9 @@ type FakePriorityLevelConfigurations struct { Fake *FakeFlowcontrolV1beta1 } -var prioritylevelconfigurationsResource = schema.GroupVersionResource{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Resource: "prioritylevelconfigurations"} +var prioritylevelconfigurationsResource = v1beta1.SchemeGroupVersion.WithResource("prioritylevelconfigurations") -var prioritylevelconfigurationsKind = schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Kind: "PriorityLevelConfiguration"} +var prioritylevelconfigurationsKind = v1beta1.SchemeGroupVersion.WithKind("PriorityLevelConfiguration") // Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. func (c *FakePriorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowschema.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowschema.go index e4603eba7119..7ce6d2116be6 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowschema.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowschema.go @@ -26,7 +26,6 @@ import ( v1beta2 "k8s.io/api/flowcontrol/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2" @@ -38,9 +37,9 @@ type FakeFlowSchemas struct { Fake *FakeFlowcontrolV1beta2 } -var flowschemasResource = schema.GroupVersionResource{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Resource: "flowschemas"} +var flowschemasResource = v1beta2.SchemeGroupVersion.WithResource("flowschemas") -var flowschemasKind = schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "FlowSchema"} +var flowschemasKind = v1beta2.SchemeGroupVersion.WithKind("FlowSchema") // Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. func (c *FakeFlowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.FlowSchema, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_prioritylevelconfiguration.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_prioritylevelconfiguration.go index e1979d0751e6..7340f8a09e4f 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_prioritylevelconfiguration.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_prioritylevelconfiguration.go @@ -26,7 +26,6 @@ import ( v1beta2 "k8s.io/api/flowcontrol/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2" @@ -38,9 +37,9 @@ type FakePriorityLevelConfigurations struct { Fake *FakeFlowcontrolV1beta2 } -var prioritylevelconfigurationsResource = schema.GroupVersionResource{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Resource: "prioritylevelconfigurations"} +var prioritylevelconfigurationsResource = v1beta2.SchemeGroupVersion.WithResource("prioritylevelconfigurations") -var prioritylevelconfigurationsKind = schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "PriorityLevelConfiguration"} +var prioritylevelconfigurationsKind = v1beta2.SchemeGroupVersion.WithKind("PriorityLevelConfiguration") // Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. func (c *FakePriorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_flowschema.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_flowschema.go index d6a8bdc32f3e..1371f6ed6744 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_flowschema.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_flowschema.go @@ -26,7 +26,6 @@ import ( v1beta3 "k8s.io/api/flowcontrol/v1beta3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1beta3 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3" @@ -38,9 +37,9 @@ type FakeFlowSchemas struct { Fake *FakeFlowcontrolV1beta3 } -var flowschemasResource = schema.GroupVersionResource{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Resource: "flowschemas"} +var flowschemasResource = v1beta3.SchemeGroupVersion.WithResource("flowschemas") -var flowschemasKind = schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "FlowSchema"} +var flowschemasKind = v1beta3.SchemeGroupVersion.WithKind("FlowSchema") // Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. func (c *FakeFlowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta3.FlowSchema, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_prioritylevelconfiguration.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_prioritylevelconfiguration.go index 5fbc57da7fe3..a0e266fecb5c 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_prioritylevelconfiguration.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_prioritylevelconfiguration.go @@ -26,7 +26,6 @@ import ( v1beta3 "k8s.io/api/flowcontrol/v1beta3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1beta3 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3" @@ -38,9 +37,9 @@ type FakePriorityLevelConfigurations struct { Fake *FakeFlowcontrolV1beta3 } -var prioritylevelconfigurationsResource = schema.GroupVersionResource{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Resource: "prioritylevelconfigurations"} +var prioritylevelconfigurationsResource = v1beta3.SchemeGroupVersion.WithResource("prioritylevelconfigurations") -var prioritylevelconfigurationsKind = schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "PriorityLevelConfiguration"} +var prioritylevelconfigurationsKind = v1beta3.SchemeGroupVersion.WithKind("PriorityLevelConfiguration") // Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. func (c *FakePriorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingress.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingress.go index 8ffcd7f97cf4..002de0dd8a07 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingress.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingress.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - networkingv1 "k8s.io/api/networking/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationsnetworkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" + networkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" testing "k8s.io/client-go/testing" ) @@ -39,25 +38,25 @@ type FakeIngresses struct { ns string } -var ingressesResource = schema.GroupVersionResource{Group: "networking.k8s.io", Version: "v1", Resource: "ingresses"} +var ingressesResource = v1.SchemeGroupVersion.WithResource("ingresses") -var ingressesKind = schema.GroupVersionKind{Group: "networking.k8s.io", Version: "v1", Kind: "Ingress"} +var ingressesKind = v1.SchemeGroupVersion.WithKind("Ingress") // Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. -func (c *FakeIngresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *networkingv1.Ingress, err error) { +func (c *FakeIngresses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Ingress, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(ingressesResource, c.ns, name), &networkingv1.Ingress{}) + Invokes(testing.NewGetAction(ingressesResource, c.ns, name), &v1.Ingress{}) if obj == nil { return nil, err } - return obj.(*networkingv1.Ingress), err + return obj.(*v1.Ingress), err } // List takes label and field selectors, and returns the list of Ingresses that match those selectors. -func (c *FakeIngresses) List(ctx context.Context, opts v1.ListOptions) (result *networkingv1.IngressList, err error) { +func (c *FakeIngresses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.IngressList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(ingressesResource, ingressesKind, c.ns, opts), &networkingv1.IngressList{}) + Invokes(testing.NewListAction(ingressesResource, ingressesKind, c.ns, opts), &v1.IngressList{}) if obj == nil { return nil, err @@ -67,8 +66,8 @@ func (c *FakeIngresses) List(ctx context.Context, opts v1.ListOptions) (result * if label == nil { label = labels.Everything() } - list := &networkingv1.IngressList{ListMeta: obj.(*networkingv1.IngressList).ListMeta} - for _, item := range obj.(*networkingv1.IngressList).Items { + list := &v1.IngressList{ListMeta: obj.(*v1.IngressList).ListMeta} + for _, item := range obj.(*v1.IngressList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -77,75 +76,75 @@ func (c *FakeIngresses) List(ctx context.Context, opts v1.ListOptions) (result * } // Watch returns a watch.Interface that watches the requested ingresses. -func (c *FakeIngresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeIngresses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(ingressesResource, c.ns, opts)) } // Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *FakeIngresses) Create(ctx context.Context, ingress *networkingv1.Ingress, opts v1.CreateOptions) (result *networkingv1.Ingress, err error) { +func (c *FakeIngresses) Create(ctx context.Context, ingress *v1.Ingress, opts metav1.CreateOptions) (result *v1.Ingress, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(ingressesResource, c.ns, ingress), &networkingv1.Ingress{}) + Invokes(testing.NewCreateAction(ingressesResource, c.ns, ingress), &v1.Ingress{}) if obj == nil { return nil, err } - return obj.(*networkingv1.Ingress), err + return obj.(*v1.Ingress), err } // Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *FakeIngresses) Update(ctx context.Context, ingress *networkingv1.Ingress, opts v1.UpdateOptions) (result *networkingv1.Ingress, err error) { +func (c *FakeIngresses) Update(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (result *v1.Ingress, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(ingressesResource, c.ns, ingress), &networkingv1.Ingress{}) + Invokes(testing.NewUpdateAction(ingressesResource, c.ns, ingress), &v1.Ingress{}) if obj == nil { return nil, err } - return obj.(*networkingv1.Ingress), err + return obj.(*v1.Ingress), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *networkingv1.Ingress, opts v1.UpdateOptions) (*networkingv1.Ingress, error) { +func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (*v1.Ingress, error) { obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(ingressesResource, "status", c.ns, ingress), &networkingv1.Ingress{}) + Invokes(testing.NewUpdateSubresourceAction(ingressesResource, "status", c.ns, ingress), &v1.Ingress{}) if obj == nil { return nil, err } - return obj.(*networkingv1.Ingress), err + return obj.(*v1.Ingress), err } // Delete takes name of the ingress and deletes it. Returns an error if one occurs. -func (c *FakeIngresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeIngresses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(ingressesResource, c.ns, name, opts), &networkingv1.Ingress{}) + Invokes(testing.NewDeleteActionWithOptions(ingressesResource, c.ns, name, opts), &v1.Ingress{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeIngresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeIngresses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewDeleteCollectionAction(ingressesResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &networkingv1.IngressList{}) + _, err := c.Fake.Invokes(action, &v1.IngressList{}) return err } // Patch applies the patch and returns the patched ingress. -func (c *FakeIngresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *networkingv1.Ingress, err error) { +func (c *FakeIngresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Ingress, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, name, pt, data, subresources...), &networkingv1.Ingress{}) + Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, name, pt, data, subresources...), &v1.Ingress{}) if obj == nil { return nil, err } - return obj.(*networkingv1.Ingress), err + return obj.(*v1.Ingress), err } // Apply takes the given apply declarative configuration, applies it and returns the applied ingress. -func (c *FakeIngresses) Apply(ctx context.Context, ingress *applyconfigurationsnetworkingv1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *networkingv1.Ingress, err error) { +func (c *FakeIngresses) Apply(ctx context.Context, ingress *networkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error) { if ingress == nil { return nil, fmt.Errorf("ingress provided to Apply must not be nil") } @@ -158,17 +157,17 @@ func (c *FakeIngresses) Apply(ctx context.Context, ingress *applyconfigurationsn return nil, fmt.Errorf("ingress.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, *name, types.ApplyPatchType, data), &networkingv1.Ingress{}) + Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, *name, types.ApplyPatchType, data), &v1.Ingress{}) if obj == nil { return nil, err } - return obj.(*networkingv1.Ingress), err + return obj.(*v1.Ingress), err } // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeIngresses) ApplyStatus(ctx context.Context, ingress *applyconfigurationsnetworkingv1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *networkingv1.Ingress, err error) { +func (c *FakeIngresses) ApplyStatus(ctx context.Context, ingress *networkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error) { if ingress == nil { return nil, fmt.Errorf("ingress provided to Apply must not be nil") } @@ -181,10 +180,10 @@ func (c *FakeIngresses) ApplyStatus(ctx context.Context, ingress *applyconfigura return nil, fmt.Errorf("ingress.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, *name, types.ApplyPatchType, data, "status"), &networkingv1.Ingress{}) + Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.Ingress{}) if obj == nil { return nil, err } - return obj.(*networkingv1.Ingress), err + return obj.(*v1.Ingress), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingressclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingressclass.go index fe320c4b72f1..208a975082f2 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingressclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingressclass.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - networkingv1 "k8s.io/api/networking/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationsnetworkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" + networkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" testing "k8s.io/client-go/testing" ) @@ -38,24 +37,24 @@ type FakeIngressClasses struct { Fake *FakeNetworkingV1 } -var ingressclassesResource = schema.GroupVersionResource{Group: "networking.k8s.io", Version: "v1", Resource: "ingressclasses"} +var ingressclassesResource = v1.SchemeGroupVersion.WithResource("ingressclasses") -var ingressclassesKind = schema.GroupVersionKind{Group: "networking.k8s.io", Version: "v1", Kind: "IngressClass"} +var ingressclassesKind = v1.SchemeGroupVersion.WithKind("IngressClass") // Get takes name of the ingressClass, and returns the corresponding ingressClass object, and an error if there is any. -func (c *FakeIngressClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *networkingv1.IngressClass, err error) { +func (c *FakeIngressClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.IngressClass, err error) { obj, err := c.Fake. - Invokes(testing.NewRootGetAction(ingressclassesResource, name), &networkingv1.IngressClass{}) + Invokes(testing.NewRootGetAction(ingressclassesResource, name), &v1.IngressClass{}) if obj == nil { return nil, err } - return obj.(*networkingv1.IngressClass), err + return obj.(*v1.IngressClass), err } // List takes label and field selectors, and returns the list of IngressClasses that match those selectors. -func (c *FakeIngressClasses) List(ctx context.Context, opts v1.ListOptions) (result *networkingv1.IngressClassList, err error) { +func (c *FakeIngressClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.IngressClassList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootListAction(ingressclassesResource, ingressclassesKind, opts), &networkingv1.IngressClassList{}) + Invokes(testing.NewRootListAction(ingressclassesResource, ingressclassesKind, opts), &v1.IngressClassList{}) if obj == nil { return nil, err } @@ -64,8 +63,8 @@ func (c *FakeIngressClasses) List(ctx context.Context, opts v1.ListOptions) (res if label == nil { label = labels.Everything() } - list := &networkingv1.IngressClassList{ListMeta: obj.(*networkingv1.IngressClassList).ListMeta} - for _, item := range obj.(*networkingv1.IngressClassList).Items { + list := &v1.IngressClassList{ListMeta: obj.(*v1.IngressClassList).ListMeta} + for _, item := range obj.(*v1.IngressClassList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -74,58 +73,58 @@ func (c *FakeIngressClasses) List(ctx context.Context, opts v1.ListOptions) (res } // Watch returns a watch.Interface that watches the requested ingressClasses. -func (c *FakeIngressClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeIngressClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(ingressclassesResource, opts)) } // Create takes the representation of a ingressClass and creates it. Returns the server's representation of the ingressClass, and an error, if there is any. -func (c *FakeIngressClasses) Create(ctx context.Context, ingressClass *networkingv1.IngressClass, opts v1.CreateOptions) (result *networkingv1.IngressClass, err error) { +func (c *FakeIngressClasses) Create(ctx context.Context, ingressClass *v1.IngressClass, opts metav1.CreateOptions) (result *v1.IngressClass, err error) { obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(ingressclassesResource, ingressClass), &networkingv1.IngressClass{}) + Invokes(testing.NewRootCreateAction(ingressclassesResource, ingressClass), &v1.IngressClass{}) if obj == nil { return nil, err } - return obj.(*networkingv1.IngressClass), err + return obj.(*v1.IngressClass), err } // Update takes the representation of a ingressClass and updates it. Returns the server's representation of the ingressClass, and an error, if there is any. -func (c *FakeIngressClasses) Update(ctx context.Context, ingressClass *networkingv1.IngressClass, opts v1.UpdateOptions) (result *networkingv1.IngressClass, err error) { +func (c *FakeIngressClasses) Update(ctx context.Context, ingressClass *v1.IngressClass, opts metav1.UpdateOptions) (result *v1.IngressClass, err error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(ingressclassesResource, ingressClass), &networkingv1.IngressClass{}) + Invokes(testing.NewRootUpdateAction(ingressclassesResource, ingressClass), &v1.IngressClass{}) if obj == nil { return nil, err } - return obj.(*networkingv1.IngressClass), err + return obj.(*v1.IngressClass), err } // Delete takes name of the ingressClass and deletes it. Returns an error if one occurs. -func (c *FakeIngressClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeIngressClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(ingressclassesResource, name, opts), &networkingv1.IngressClass{}) + Invokes(testing.NewRootDeleteActionWithOptions(ingressclassesResource, name, opts), &v1.IngressClass{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeIngressClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeIngressClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(ingressclassesResource, listOpts) - _, err := c.Fake.Invokes(action, &networkingv1.IngressClassList{}) + _, err := c.Fake.Invokes(action, &v1.IngressClassList{}) return err } // Patch applies the patch and returns the patched ingressClass. -func (c *FakeIngressClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *networkingv1.IngressClass, err error) { +func (c *FakeIngressClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.IngressClass, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(ingressclassesResource, name, pt, data, subresources...), &networkingv1.IngressClass{}) + Invokes(testing.NewRootPatchSubresourceAction(ingressclassesResource, name, pt, data, subresources...), &v1.IngressClass{}) if obj == nil { return nil, err } - return obj.(*networkingv1.IngressClass), err + return obj.(*v1.IngressClass), err } // Apply takes the given apply declarative configuration, applies it and returns the applied ingressClass. -func (c *FakeIngressClasses) Apply(ctx context.Context, ingressClass *applyconfigurationsnetworkingv1.IngressClassApplyConfiguration, opts v1.ApplyOptions) (result *networkingv1.IngressClass, err error) { +func (c *FakeIngressClasses) Apply(ctx context.Context, ingressClass *networkingv1.IngressClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.IngressClass, err error) { if ingressClass == nil { return nil, fmt.Errorf("ingressClass provided to Apply must not be nil") } @@ -138,9 +137,9 @@ func (c *FakeIngressClasses) Apply(ctx context.Context, ingressClass *applyconfi return nil, fmt.Errorf("ingressClass.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(ingressclassesResource, *name, types.ApplyPatchType, data), &networkingv1.IngressClass{}) + Invokes(testing.NewRootPatchSubresourceAction(ingressclassesResource, *name, types.ApplyPatchType, data), &v1.IngressClass{}) if obj == nil { return nil, err } - return obj.(*networkingv1.IngressClass), err + return obj.(*v1.IngressClass), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go index 16c10cac0d54..be7413cb8fe6 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - networkingv1 "k8s.io/api/networking/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationsnetworkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" + networkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" testing "k8s.io/client-go/testing" ) @@ -39,25 +38,25 @@ type FakeNetworkPolicies struct { ns string } -var networkpoliciesResource = schema.GroupVersionResource{Group: "networking.k8s.io", Version: "v1", Resource: "networkpolicies"} +var networkpoliciesResource = v1.SchemeGroupVersion.WithResource("networkpolicies") -var networkpoliciesKind = schema.GroupVersionKind{Group: "networking.k8s.io", Version: "v1", Kind: "NetworkPolicy"} +var networkpoliciesKind = v1.SchemeGroupVersion.WithKind("NetworkPolicy") // Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any. -func (c *FakeNetworkPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *networkingv1.NetworkPolicy, err error) { +func (c *FakeNetworkPolicies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.NetworkPolicy, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(networkpoliciesResource, c.ns, name), &networkingv1.NetworkPolicy{}) + Invokes(testing.NewGetAction(networkpoliciesResource, c.ns, name), &v1.NetworkPolicy{}) if obj == nil { return nil, err } - return obj.(*networkingv1.NetworkPolicy), err + return obj.(*v1.NetworkPolicy), err } // List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. -func (c *FakeNetworkPolicies) List(ctx context.Context, opts v1.ListOptions) (result *networkingv1.NetworkPolicyList, err error) { +func (c *FakeNetworkPolicies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NetworkPolicyList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(networkpoliciesResource, networkpoliciesKind, c.ns, opts), &networkingv1.NetworkPolicyList{}) + Invokes(testing.NewListAction(networkpoliciesResource, networkpoliciesKind, c.ns, opts), &v1.NetworkPolicyList{}) if obj == nil { return nil, err @@ -67,8 +66,8 @@ func (c *FakeNetworkPolicies) List(ctx context.Context, opts v1.ListOptions) (re if label == nil { label = labels.Everything() } - list := &networkingv1.NetworkPolicyList{ListMeta: obj.(*networkingv1.NetworkPolicyList).ListMeta} - for _, item := range obj.(*networkingv1.NetworkPolicyList).Items { + list := &v1.NetworkPolicyList{ListMeta: obj.(*v1.NetworkPolicyList).ListMeta} + for _, item := range obj.(*v1.NetworkPolicyList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -77,75 +76,75 @@ func (c *FakeNetworkPolicies) List(ctx context.Context, opts v1.ListOptions) (re } // Watch returns a watch.Interface that watches the requested networkPolicies. -func (c *FakeNetworkPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeNetworkPolicies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(networkpoliciesResource, c.ns, opts)) } // Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *FakeNetworkPolicies) Create(ctx context.Context, networkPolicy *networkingv1.NetworkPolicy, opts v1.CreateOptions) (result *networkingv1.NetworkPolicy, err error) { +func (c *FakeNetworkPolicies) Create(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.CreateOptions) (result *v1.NetworkPolicy, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(networkpoliciesResource, c.ns, networkPolicy), &networkingv1.NetworkPolicy{}) + Invokes(testing.NewCreateAction(networkpoliciesResource, c.ns, networkPolicy), &v1.NetworkPolicy{}) if obj == nil { return nil, err } - return obj.(*networkingv1.NetworkPolicy), err + return obj.(*v1.NetworkPolicy), err } // Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *FakeNetworkPolicies) Update(ctx context.Context, networkPolicy *networkingv1.NetworkPolicy, opts v1.UpdateOptions) (result *networkingv1.NetworkPolicy, err error) { +func (c *FakeNetworkPolicies) Update(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (result *v1.NetworkPolicy, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(networkpoliciesResource, c.ns, networkPolicy), &networkingv1.NetworkPolicy{}) + Invokes(testing.NewUpdateAction(networkpoliciesResource, c.ns, networkPolicy), &v1.NetworkPolicy{}) if obj == nil { return nil, err } - return obj.(*networkingv1.NetworkPolicy), err + return obj.(*v1.NetworkPolicy), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeNetworkPolicies) UpdateStatus(ctx context.Context, networkPolicy *networkingv1.NetworkPolicy, opts v1.UpdateOptions) (*networkingv1.NetworkPolicy, error) { +func (c *FakeNetworkPolicies) UpdateStatus(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (*v1.NetworkPolicy, error) { obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(networkpoliciesResource, "status", c.ns, networkPolicy), &networkingv1.NetworkPolicy{}) + Invokes(testing.NewUpdateSubresourceAction(networkpoliciesResource, "status", c.ns, networkPolicy), &v1.NetworkPolicy{}) if obj == nil { return nil, err } - return obj.(*networkingv1.NetworkPolicy), err + return obj.(*v1.NetworkPolicy), err } // Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. -func (c *FakeNetworkPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeNetworkPolicies) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(networkpoliciesResource, c.ns, name, opts), &networkingv1.NetworkPolicy{}) + Invokes(testing.NewDeleteActionWithOptions(networkpoliciesResource, c.ns, name, opts), &v1.NetworkPolicy{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeNetworkPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeNetworkPolicies) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewDeleteCollectionAction(networkpoliciesResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &networkingv1.NetworkPolicyList{}) + _, err := c.Fake.Invokes(action, &v1.NetworkPolicyList{}) return err } // Patch applies the patch and returns the patched networkPolicy. -func (c *FakeNetworkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *networkingv1.NetworkPolicy, err error) { +func (c *FakeNetworkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NetworkPolicy, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, name, pt, data, subresources...), &networkingv1.NetworkPolicy{}) + Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, name, pt, data, subresources...), &v1.NetworkPolicy{}) if obj == nil { return nil, err } - return obj.(*networkingv1.NetworkPolicy), err + return obj.(*v1.NetworkPolicy), err } // Apply takes the given apply declarative configuration, applies it and returns the applied networkPolicy. -func (c *FakeNetworkPolicies) Apply(ctx context.Context, networkPolicy *applyconfigurationsnetworkingv1.NetworkPolicyApplyConfiguration, opts v1.ApplyOptions) (result *networkingv1.NetworkPolicy, err error) { +func (c *FakeNetworkPolicies) Apply(ctx context.Context, networkPolicy *networkingv1.NetworkPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.NetworkPolicy, err error) { if networkPolicy == nil { return nil, fmt.Errorf("networkPolicy provided to Apply must not be nil") } @@ -158,17 +157,17 @@ func (c *FakeNetworkPolicies) Apply(ctx context.Context, networkPolicy *applycon return nil, fmt.Errorf("networkPolicy.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, *name, types.ApplyPatchType, data), &networkingv1.NetworkPolicy{}) + Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, *name, types.ApplyPatchType, data), &v1.NetworkPolicy{}) if obj == nil { return nil, err } - return obj.(*networkingv1.NetworkPolicy), err + return obj.(*v1.NetworkPolicy), err } // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeNetworkPolicies) ApplyStatus(ctx context.Context, networkPolicy *applyconfigurationsnetworkingv1.NetworkPolicyApplyConfiguration, opts v1.ApplyOptions) (result *networkingv1.NetworkPolicy, err error) { +func (c *FakeNetworkPolicies) ApplyStatus(ctx context.Context, networkPolicy *networkingv1.NetworkPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.NetworkPolicy, err error) { if networkPolicy == nil { return nil, fmt.Errorf("networkPolicy provided to Apply must not be nil") } @@ -181,10 +180,10 @@ func (c *FakeNetworkPolicies) ApplyStatus(ctx context.Context, networkPolicy *ap return nil, fmt.Errorf("networkPolicy.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, *name, types.ApplyPatchType, data, "status"), &networkingv1.NetworkPolicy{}) + Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.NetworkPolicy{}) if obj == nil { return nil, err } - return obj.(*networkingv1.NetworkPolicy), err + return obj.(*v1.NetworkPolicy), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_clustercidr.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_clustercidr.go index ca0352d390ce..592e9fc63dc4 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_clustercidr.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_clustercidr.go @@ -26,7 +26,6 @@ import ( v1alpha1 "k8s.io/api/networking/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" @@ -38,9 +37,9 @@ type FakeClusterCIDRs struct { Fake *FakeNetworkingV1alpha1 } -var clustercidrsResource = schema.GroupVersionResource{Group: "networking.k8s.io", Version: "v1alpha1", Resource: "clustercidrs"} +var clustercidrsResource = v1alpha1.SchemeGroupVersion.WithResource("clustercidrs") -var clustercidrsKind = schema.GroupVersionKind{Group: "networking.k8s.io", Version: "v1alpha1", Kind: "ClusterCIDR"} +var clustercidrsKind = v1alpha1.SchemeGroupVersion.WithKind("ClusterCIDR") // Get takes name of the clusterCIDR, and returns the corresponding clusterCIDR object, and an error if there is any. func (c *FakeClusterCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterCIDR, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go index 349196c53433..7a3b861be046 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go @@ -26,7 +26,6 @@ import ( v1beta1 "k8s.io/api/networking/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" @@ -39,9 +38,9 @@ type FakeIngresses struct { ns string } -var ingressesResource = schema.GroupVersionResource{Group: "networking.k8s.io", Version: "v1beta1", Resource: "ingresses"} +var ingressesResource = v1beta1.SchemeGroupVersion.WithResource("ingresses") -var ingressesKind = schema.GroupVersionKind{Group: "networking.k8s.io", Version: "v1beta1", Kind: "Ingress"} +var ingressesKind = v1beta1.SchemeGroupVersion.WithKind("Ingress") // Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. func (c *FakeIngresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingressclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingressclass.go index 6ef8d1eddd51..1804e61fc37b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingressclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingressclass.go @@ -26,7 +26,6 @@ import ( v1beta1 "k8s.io/api/networking/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" @@ -38,9 +37,9 @@ type FakeIngressClasses struct { Fake *FakeNetworkingV1beta1 } -var ingressclassesResource = schema.GroupVersionResource{Group: "networking.k8s.io", Version: "v1beta1", Resource: "ingressclasses"} +var ingressclassesResource = v1beta1.SchemeGroupVersion.WithResource("ingressclasses") -var ingressclassesKind = schema.GroupVersionKind{Group: "networking.k8s.io", Version: "v1beta1", Kind: "IngressClass"} +var ingressclassesKind = v1beta1.SchemeGroupVersion.WithKind("IngressClass") // Get takes name of the ingressClass, and returns the corresponding ingressClass object, and an error if there is any. func (c *FakeIngressClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.IngressClass, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_runtimeclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_runtimeclass.go index 3a1aaf1b6039..35cfbcae4bfe 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_runtimeclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_runtimeclass.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - nodev1 "k8s.io/api/node/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/node/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationsnodev1 "k8s.io/client-go/applyconfigurations/node/v1" + nodev1 "k8s.io/client-go/applyconfigurations/node/v1" testing "k8s.io/client-go/testing" ) @@ -38,24 +37,24 @@ type FakeRuntimeClasses struct { Fake *FakeNodeV1 } -var runtimeclassesResource = schema.GroupVersionResource{Group: "node.k8s.io", Version: "v1", Resource: "runtimeclasses"} +var runtimeclassesResource = v1.SchemeGroupVersion.WithResource("runtimeclasses") -var runtimeclassesKind = schema.GroupVersionKind{Group: "node.k8s.io", Version: "v1", Kind: "RuntimeClass"} +var runtimeclassesKind = v1.SchemeGroupVersion.WithKind("RuntimeClass") // Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. -func (c *FakeRuntimeClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *nodev1.RuntimeClass, err error) { +func (c *FakeRuntimeClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.RuntimeClass, err error) { obj, err := c.Fake. - Invokes(testing.NewRootGetAction(runtimeclassesResource, name), &nodev1.RuntimeClass{}) + Invokes(testing.NewRootGetAction(runtimeclassesResource, name), &v1.RuntimeClass{}) if obj == nil { return nil, err } - return obj.(*nodev1.RuntimeClass), err + return obj.(*v1.RuntimeClass), err } // List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. -func (c *FakeRuntimeClasses) List(ctx context.Context, opts v1.ListOptions) (result *nodev1.RuntimeClassList, err error) { +func (c *FakeRuntimeClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RuntimeClassList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootListAction(runtimeclassesResource, runtimeclassesKind, opts), &nodev1.RuntimeClassList{}) + Invokes(testing.NewRootListAction(runtimeclassesResource, runtimeclassesKind, opts), &v1.RuntimeClassList{}) if obj == nil { return nil, err } @@ -64,8 +63,8 @@ func (c *FakeRuntimeClasses) List(ctx context.Context, opts v1.ListOptions) (res if label == nil { label = labels.Everything() } - list := &nodev1.RuntimeClassList{ListMeta: obj.(*nodev1.RuntimeClassList).ListMeta} - for _, item := range obj.(*nodev1.RuntimeClassList).Items { + list := &v1.RuntimeClassList{ListMeta: obj.(*v1.RuntimeClassList).ListMeta} + for _, item := range obj.(*v1.RuntimeClassList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -74,58 +73,58 @@ func (c *FakeRuntimeClasses) List(ctx context.Context, opts v1.ListOptions) (res } // Watch returns a watch.Interface that watches the requested runtimeClasses. -func (c *FakeRuntimeClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeRuntimeClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(runtimeclassesResource, opts)) } // Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *FakeRuntimeClasses) Create(ctx context.Context, runtimeClass *nodev1.RuntimeClass, opts v1.CreateOptions) (result *nodev1.RuntimeClass, err error) { +func (c *FakeRuntimeClasses) Create(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.CreateOptions) (result *v1.RuntimeClass, err error) { obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(runtimeclassesResource, runtimeClass), &nodev1.RuntimeClass{}) + Invokes(testing.NewRootCreateAction(runtimeclassesResource, runtimeClass), &v1.RuntimeClass{}) if obj == nil { return nil, err } - return obj.(*nodev1.RuntimeClass), err + return obj.(*v1.RuntimeClass), err } // Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *FakeRuntimeClasses) Update(ctx context.Context, runtimeClass *nodev1.RuntimeClass, opts v1.UpdateOptions) (result *nodev1.RuntimeClass, err error) { +func (c *FakeRuntimeClasses) Update(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.UpdateOptions) (result *v1.RuntimeClass, err error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(runtimeclassesResource, runtimeClass), &nodev1.RuntimeClass{}) + Invokes(testing.NewRootUpdateAction(runtimeclassesResource, runtimeClass), &v1.RuntimeClass{}) if obj == nil { return nil, err } - return obj.(*nodev1.RuntimeClass), err + return obj.(*v1.RuntimeClass), err } // Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. -func (c *FakeRuntimeClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeRuntimeClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(runtimeclassesResource, name, opts), &nodev1.RuntimeClass{}) + Invokes(testing.NewRootDeleteActionWithOptions(runtimeclassesResource, name, opts), &v1.RuntimeClass{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeRuntimeClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeRuntimeClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(runtimeclassesResource, listOpts) - _, err := c.Fake.Invokes(action, &nodev1.RuntimeClassList{}) + _, err := c.Fake.Invokes(action, &v1.RuntimeClassList{}) return err } // Patch applies the patch and returns the patched runtimeClass. -func (c *FakeRuntimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *nodev1.RuntimeClass, err error) { +func (c *FakeRuntimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RuntimeClass, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, name, pt, data, subresources...), &nodev1.RuntimeClass{}) + Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, name, pt, data, subresources...), &v1.RuntimeClass{}) if obj == nil { return nil, err } - return obj.(*nodev1.RuntimeClass), err + return obj.(*v1.RuntimeClass), err } // Apply takes the given apply declarative configuration, applies it and returns the applied runtimeClass. -func (c *FakeRuntimeClasses) Apply(ctx context.Context, runtimeClass *applyconfigurationsnodev1.RuntimeClassApplyConfiguration, opts v1.ApplyOptions) (result *nodev1.RuntimeClass, err error) { +func (c *FakeRuntimeClasses) Apply(ctx context.Context, runtimeClass *nodev1.RuntimeClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.RuntimeClass, err error) { if runtimeClass == nil { return nil, fmt.Errorf("runtimeClass provided to Apply must not be nil") } @@ -138,9 +137,9 @@ func (c *FakeRuntimeClasses) Apply(ctx context.Context, runtimeClass *applyconfi return nil, fmt.Errorf("runtimeClass.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, *name, types.ApplyPatchType, data), &nodev1.RuntimeClass{}) + Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, *name, types.ApplyPatchType, data), &v1.RuntimeClass{}) if obj == nil { return nil, err } - return obj.(*nodev1.RuntimeClass), err + return obj.(*v1.RuntimeClass), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go index 4b6387943dd9..2ff7d3f973e6 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go @@ -26,7 +26,6 @@ import ( v1alpha1 "k8s.io/api/node/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" nodev1alpha1 "k8s.io/client-go/applyconfigurations/node/v1alpha1" @@ -38,9 +37,9 @@ type FakeRuntimeClasses struct { Fake *FakeNodeV1alpha1 } -var runtimeclassesResource = schema.GroupVersionResource{Group: "node.k8s.io", Version: "v1alpha1", Resource: "runtimeclasses"} +var runtimeclassesResource = v1alpha1.SchemeGroupVersion.WithResource("runtimeclasses") -var runtimeclassesKind = schema.GroupVersionKind{Group: "node.k8s.io", Version: "v1alpha1", Kind: "RuntimeClass"} +var runtimeclassesKind = v1alpha1.SchemeGroupVersion.WithKind("RuntimeClass") // Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. func (c *FakeRuntimeClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RuntimeClass, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go index 191162114ba6..e6552f9acaf5 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go @@ -26,7 +26,6 @@ import ( v1beta1 "k8s.io/api/node/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" nodev1beta1 "k8s.io/client-go/applyconfigurations/node/v1beta1" @@ -38,9 +37,9 @@ type FakeRuntimeClasses struct { Fake *FakeNodeV1beta1 } -var runtimeclassesResource = schema.GroupVersionResource{Group: "node.k8s.io", Version: "v1beta1", Resource: "runtimeclasses"} +var runtimeclassesResource = v1beta1.SchemeGroupVersion.WithResource("runtimeclasses") -var runtimeclassesKind = schema.GroupVersionKind{Group: "node.k8s.io", Version: "v1beta1", Kind: "RuntimeClass"} +var runtimeclassesKind = v1beta1.SchemeGroupVersion.WithKind("RuntimeClass") // Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. func (c *FakeRuntimeClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.RuntimeClass, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_poddisruptionbudget.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_poddisruptionbudget.go index 8023e2cd6f4f..7b5f51caf4ac 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_poddisruptionbudget.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_poddisruptionbudget.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - policyv1 "k8s.io/api/policy/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/policy/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationspolicyv1 "k8s.io/client-go/applyconfigurations/policy/v1" + policyv1 "k8s.io/client-go/applyconfigurations/policy/v1" testing "k8s.io/client-go/testing" ) @@ -39,25 +38,25 @@ type FakePodDisruptionBudgets struct { ns string } -var poddisruptionbudgetsResource = schema.GroupVersionResource{Group: "policy", Version: "v1", Resource: "poddisruptionbudgets"} +var poddisruptionbudgetsResource = v1.SchemeGroupVersion.WithResource("poddisruptionbudgets") -var poddisruptionbudgetsKind = schema.GroupVersionKind{Group: "policy", Version: "v1", Kind: "PodDisruptionBudget"} +var poddisruptionbudgetsKind = v1.SchemeGroupVersion.WithKind("PodDisruptionBudget") // Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any. -func (c *FakePodDisruptionBudgets) Get(ctx context.Context, name string, options v1.GetOptions) (result *policyv1.PodDisruptionBudget, err error) { +func (c *FakePodDisruptionBudgets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PodDisruptionBudget, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(poddisruptionbudgetsResource, c.ns, name), &policyv1.PodDisruptionBudget{}) + Invokes(testing.NewGetAction(poddisruptionbudgetsResource, c.ns, name), &v1.PodDisruptionBudget{}) if obj == nil { return nil, err } - return obj.(*policyv1.PodDisruptionBudget), err + return obj.(*v1.PodDisruptionBudget), err } // List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. -func (c *FakePodDisruptionBudgets) List(ctx context.Context, opts v1.ListOptions) (result *policyv1.PodDisruptionBudgetList, err error) { +func (c *FakePodDisruptionBudgets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodDisruptionBudgetList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(poddisruptionbudgetsResource, poddisruptionbudgetsKind, c.ns, opts), &policyv1.PodDisruptionBudgetList{}) + Invokes(testing.NewListAction(poddisruptionbudgetsResource, poddisruptionbudgetsKind, c.ns, opts), &v1.PodDisruptionBudgetList{}) if obj == nil { return nil, err @@ -67,8 +66,8 @@ func (c *FakePodDisruptionBudgets) List(ctx context.Context, opts v1.ListOptions if label == nil { label = labels.Everything() } - list := &policyv1.PodDisruptionBudgetList{ListMeta: obj.(*policyv1.PodDisruptionBudgetList).ListMeta} - for _, item := range obj.(*policyv1.PodDisruptionBudgetList).Items { + list := &v1.PodDisruptionBudgetList{ListMeta: obj.(*v1.PodDisruptionBudgetList).ListMeta} + for _, item := range obj.(*v1.PodDisruptionBudgetList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -77,75 +76,75 @@ func (c *FakePodDisruptionBudgets) List(ctx context.Context, opts v1.ListOptions } // Watch returns a watch.Interface that watches the requested podDisruptionBudgets. -func (c *FakePodDisruptionBudgets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakePodDisruptionBudgets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(poddisruptionbudgetsResource, c.ns, opts)) } // Create takes the representation of a podDisruptionBudget and creates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *FakePodDisruptionBudgets) Create(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudget, opts v1.CreateOptions) (result *policyv1.PodDisruptionBudget, err error) { +func (c *FakePodDisruptionBudgets) Create(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.CreateOptions) (result *v1.PodDisruptionBudget, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &policyv1.PodDisruptionBudget{}) + Invokes(testing.NewCreateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &v1.PodDisruptionBudget{}) if obj == nil { return nil, err } - return obj.(*policyv1.PodDisruptionBudget), err + return obj.(*v1.PodDisruptionBudget), err } // Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *FakePodDisruptionBudgets) Update(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudget, opts v1.UpdateOptions) (result *policyv1.PodDisruptionBudget, err error) { +func (c *FakePodDisruptionBudgets) Update(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (result *v1.PodDisruptionBudget, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &policyv1.PodDisruptionBudget{}) + Invokes(testing.NewUpdateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &v1.PodDisruptionBudget{}) if obj == nil { return nil, err } - return obj.(*policyv1.PodDisruptionBudget), err + return obj.(*v1.PodDisruptionBudget), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePodDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudget, opts v1.UpdateOptions) (*policyv1.PodDisruptionBudget, error) { +func (c *FakePodDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (*v1.PodDisruptionBudget, error) { obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(poddisruptionbudgetsResource, "status", c.ns, podDisruptionBudget), &policyv1.PodDisruptionBudget{}) + Invokes(testing.NewUpdateSubresourceAction(poddisruptionbudgetsResource, "status", c.ns, podDisruptionBudget), &v1.PodDisruptionBudget{}) if obj == nil { return nil, err } - return obj.(*policyv1.PodDisruptionBudget), err + return obj.(*v1.PodDisruptionBudget), err } // Delete takes name of the podDisruptionBudget and deletes it. Returns an error if one occurs. -func (c *FakePodDisruptionBudgets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakePodDisruptionBudgets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(poddisruptionbudgetsResource, c.ns, name, opts), &policyv1.PodDisruptionBudget{}) + Invokes(testing.NewDeleteActionWithOptions(poddisruptionbudgetsResource, c.ns, name, opts), &v1.PodDisruptionBudget{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakePodDisruptionBudgets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakePodDisruptionBudgets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewDeleteCollectionAction(poddisruptionbudgetsResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &policyv1.PodDisruptionBudgetList{}) + _, err := c.Fake.Invokes(action, &v1.PodDisruptionBudgetList{}) return err } // Patch applies the patch and returns the patched podDisruptionBudget. -func (c *FakePodDisruptionBudgets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *policyv1.PodDisruptionBudget, err error) { +func (c *FakePodDisruptionBudgets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodDisruptionBudget, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, name, pt, data, subresources...), &policyv1.PodDisruptionBudget{}) + Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, name, pt, data, subresources...), &v1.PodDisruptionBudget{}) if obj == nil { return nil, err } - return obj.(*policyv1.PodDisruptionBudget), err + return obj.(*v1.PodDisruptionBudget), err } // Apply takes the given apply declarative configuration, applies it and returns the applied podDisruptionBudget. -func (c *FakePodDisruptionBudgets) Apply(ctx context.Context, podDisruptionBudget *applyconfigurationspolicyv1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *policyv1.PodDisruptionBudget, err error) { +func (c *FakePodDisruptionBudgets) Apply(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodDisruptionBudget, err error) { if podDisruptionBudget == nil { return nil, fmt.Errorf("podDisruptionBudget provided to Apply must not be nil") } @@ -158,17 +157,17 @@ func (c *FakePodDisruptionBudgets) Apply(ctx context.Context, podDisruptionBudge return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data), &policyv1.PodDisruptionBudget{}) + Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data), &v1.PodDisruptionBudget{}) if obj == nil { return nil, err } - return obj.(*policyv1.PodDisruptionBudget), err + return obj.(*v1.PodDisruptionBudget), err } // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakePodDisruptionBudgets) ApplyStatus(ctx context.Context, podDisruptionBudget *applyconfigurationspolicyv1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *policyv1.PodDisruptionBudget, err error) { +func (c *FakePodDisruptionBudgets) ApplyStatus(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodDisruptionBudget, err error) { if podDisruptionBudget == nil { return nil, fmt.Errorf("podDisruptionBudget provided to Apply must not be nil") } @@ -181,10 +180,10 @@ func (c *FakePodDisruptionBudgets) ApplyStatus(ctx context.Context, podDisruptio return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &policyv1.PodDisruptionBudget{}) + Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.PodDisruptionBudget{}) if obj == nil { return nil, err } - return obj.(*policyv1.PodDisruptionBudget), err + return obj.(*v1.PodDisruptionBudget), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go index d1c856754cec..bcee8e77744a 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go @@ -26,7 +26,6 @@ import ( v1beta1 "k8s.io/api/policy/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" policyv1beta1 "k8s.io/client-go/applyconfigurations/policy/v1beta1" @@ -39,9 +38,9 @@ type FakePodDisruptionBudgets struct { ns string } -var poddisruptionbudgetsResource = schema.GroupVersionResource{Group: "policy", Version: "v1beta1", Resource: "poddisruptionbudgets"} +var poddisruptionbudgetsResource = v1beta1.SchemeGroupVersion.WithResource("poddisruptionbudgets") -var poddisruptionbudgetsKind = schema.GroupVersionKind{Group: "policy", Version: "v1beta1", Kind: "PodDisruptionBudget"} +var poddisruptionbudgetsKind = v1beta1.SchemeGroupVersion.WithKind("PodDisruptionBudget") // Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any. func (c *FakePodDisruptionBudgets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodDisruptionBudget, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go index 614d4e799d6f..ade1aab7f02f 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go @@ -26,7 +26,6 @@ import ( v1beta1 "k8s.io/api/policy/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" policyv1beta1 "k8s.io/client-go/applyconfigurations/policy/v1beta1" @@ -38,9 +37,9 @@ type FakePodSecurityPolicies struct { Fake *FakePolicyV1beta1 } -var podsecuritypoliciesResource = schema.GroupVersionResource{Group: "policy", Version: "v1beta1", Resource: "podsecuritypolicies"} +var podsecuritypoliciesResource = v1beta1.SchemeGroupVersion.WithResource("podsecuritypolicies") -var podsecuritypoliciesKind = schema.GroupVersionKind{Group: "policy", Version: "v1beta1", Kind: "PodSecurityPolicy"} +var podsecuritypoliciesKind = v1beta1.SchemeGroupVersion.WithKind("PodSecurityPolicy") // Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. func (c *FakePodSecurityPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go index 21a32ca04374..5add33ddfb2d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - rbacv1 "k8s.io/api/rbac/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationsrbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" + rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" testing "k8s.io/client-go/testing" ) @@ -38,24 +37,24 @@ type FakeClusterRoles struct { Fake *FakeRbacV1 } -var clusterrolesResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterroles"} +var clusterrolesResource = v1.SchemeGroupVersion.WithResource("clusterroles") -var clusterrolesKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "ClusterRole"} +var clusterrolesKind = v1.SchemeGroupVersion.WithKind("ClusterRole") // Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. -func (c *FakeClusterRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *rbacv1.ClusterRole, err error) { +func (c *FakeClusterRoles) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterRole, err error) { obj, err := c.Fake. - Invokes(testing.NewRootGetAction(clusterrolesResource, name), &rbacv1.ClusterRole{}) + Invokes(testing.NewRootGetAction(clusterrolesResource, name), &v1.ClusterRole{}) if obj == nil { return nil, err } - return obj.(*rbacv1.ClusterRole), err + return obj.(*v1.ClusterRole), err } // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. -func (c *FakeClusterRoles) List(ctx context.Context, opts v1.ListOptions) (result *rbacv1.ClusterRoleList, err error) { +func (c *FakeClusterRoles) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterRoleList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootListAction(clusterrolesResource, clusterrolesKind, opts), &rbacv1.ClusterRoleList{}) + Invokes(testing.NewRootListAction(clusterrolesResource, clusterrolesKind, opts), &v1.ClusterRoleList{}) if obj == nil { return nil, err } @@ -64,8 +63,8 @@ func (c *FakeClusterRoles) List(ctx context.Context, opts v1.ListOptions) (resul if label == nil { label = labels.Everything() } - list := &rbacv1.ClusterRoleList{ListMeta: obj.(*rbacv1.ClusterRoleList).ListMeta} - for _, item := range obj.(*rbacv1.ClusterRoleList).Items { + list := &v1.ClusterRoleList{ListMeta: obj.(*v1.ClusterRoleList).ListMeta} + for _, item := range obj.(*v1.ClusterRoleList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -74,58 +73,58 @@ func (c *FakeClusterRoles) List(ctx context.Context, opts v1.ListOptions) (resul } // Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *FakeClusterRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeClusterRoles) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(clusterrolesResource, opts)) } // Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *FakeClusterRoles) Create(ctx context.Context, clusterRole *rbacv1.ClusterRole, opts v1.CreateOptions) (result *rbacv1.ClusterRole, err error) { +func (c *FakeClusterRoles) Create(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.CreateOptions) (result *v1.ClusterRole, err error) { obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(clusterrolesResource, clusterRole), &rbacv1.ClusterRole{}) + Invokes(testing.NewRootCreateAction(clusterrolesResource, clusterRole), &v1.ClusterRole{}) if obj == nil { return nil, err } - return obj.(*rbacv1.ClusterRole), err + return obj.(*v1.ClusterRole), err } // Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *FakeClusterRoles) Update(ctx context.Context, clusterRole *rbacv1.ClusterRole, opts v1.UpdateOptions) (result *rbacv1.ClusterRole, err error) { +func (c *FakeClusterRoles) Update(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.UpdateOptions) (result *v1.ClusterRole, err error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(clusterrolesResource, clusterRole), &rbacv1.ClusterRole{}) + Invokes(testing.NewRootUpdateAction(clusterrolesResource, clusterRole), &v1.ClusterRole{}) if obj == nil { return nil, err } - return obj.(*rbacv1.ClusterRole), err + return obj.(*v1.ClusterRole), err } // Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. -func (c *FakeClusterRoles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeClusterRoles) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(clusterrolesResource, name, opts), &rbacv1.ClusterRole{}) + Invokes(testing.NewRootDeleteActionWithOptions(clusterrolesResource, name, opts), &v1.ClusterRole{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeClusterRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeClusterRoles) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(clusterrolesResource, listOpts) - _, err := c.Fake.Invokes(action, &rbacv1.ClusterRoleList{}) + _, err := c.Fake.Invokes(action, &v1.ClusterRoleList{}) return err } // Patch applies the patch and returns the patched clusterRole. -func (c *FakeClusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1.ClusterRole, err error) { +func (c *FakeClusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRole, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, pt, data, subresources...), &rbacv1.ClusterRole{}) + Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, pt, data, subresources...), &v1.ClusterRole{}) if obj == nil { return nil, err } - return obj.(*rbacv1.ClusterRole), err + return obj.(*v1.ClusterRole), err } // Apply takes the given apply declarative configuration, applies it and returns the applied clusterRole. -func (c *FakeClusterRoles) Apply(ctx context.Context, clusterRole *applyconfigurationsrbacv1.ClusterRoleApplyConfiguration, opts v1.ApplyOptions) (result *rbacv1.ClusterRole, err error) { +func (c *FakeClusterRoles) Apply(ctx context.Context, clusterRole *rbacv1.ClusterRoleApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterRole, err error) { if clusterRole == nil { return nil, fmt.Errorf("clusterRole provided to Apply must not be nil") } @@ -138,9 +137,9 @@ func (c *FakeClusterRoles) Apply(ctx context.Context, clusterRole *applyconfigur return nil, fmt.Errorf("clusterRole.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, *name, types.ApplyPatchType, data), &rbacv1.ClusterRole{}) + Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, *name, types.ApplyPatchType, data), &v1.ClusterRole{}) if obj == nil { return nil, err } - return obj.(*rbacv1.ClusterRole), err + return obj.(*v1.ClusterRole), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go index c65399598b81..d42e93e65326 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - rbacv1 "k8s.io/api/rbac/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationsrbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" + rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" testing "k8s.io/client-go/testing" ) @@ -38,24 +37,24 @@ type FakeClusterRoleBindings struct { Fake *FakeRbacV1 } -var clusterrolebindingsResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterrolebindings"} +var clusterrolebindingsResource = v1.SchemeGroupVersion.WithResource("clusterrolebindings") -var clusterrolebindingsKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "ClusterRoleBinding"} +var clusterrolebindingsKind = v1.SchemeGroupVersion.WithKind("ClusterRoleBinding") // Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. -func (c *FakeClusterRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *rbacv1.ClusterRoleBinding, err error) { +func (c *FakeClusterRoleBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterRoleBinding, err error) { obj, err := c.Fake. - Invokes(testing.NewRootGetAction(clusterrolebindingsResource, name), &rbacv1.ClusterRoleBinding{}) + Invokes(testing.NewRootGetAction(clusterrolebindingsResource, name), &v1.ClusterRoleBinding{}) if obj == nil { return nil, err } - return obj.(*rbacv1.ClusterRoleBinding), err + return obj.(*v1.ClusterRoleBinding), err } // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. -func (c *FakeClusterRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *rbacv1.ClusterRoleBindingList, err error) { +func (c *FakeClusterRoleBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterRoleBindingList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootListAction(clusterrolebindingsResource, clusterrolebindingsKind, opts), &rbacv1.ClusterRoleBindingList{}) + Invokes(testing.NewRootListAction(clusterrolebindingsResource, clusterrolebindingsKind, opts), &v1.ClusterRoleBindingList{}) if obj == nil { return nil, err } @@ -64,8 +63,8 @@ func (c *FakeClusterRoleBindings) List(ctx context.Context, opts v1.ListOptions) if label == nil { label = labels.Everything() } - list := &rbacv1.ClusterRoleBindingList{ListMeta: obj.(*rbacv1.ClusterRoleBindingList).ListMeta} - for _, item := range obj.(*rbacv1.ClusterRoleBindingList).Items { + list := &v1.ClusterRoleBindingList{ListMeta: obj.(*v1.ClusterRoleBindingList).ListMeta} + for _, item := range obj.(*v1.ClusterRoleBindingList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -74,58 +73,58 @@ func (c *FakeClusterRoleBindings) List(ctx context.Context, opts v1.ListOptions) } // Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *FakeClusterRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeClusterRoleBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(clusterrolebindingsResource, opts)) } // Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *FakeClusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *rbacv1.ClusterRoleBinding, opts v1.CreateOptions) (result *rbacv1.ClusterRoleBinding, err error) { +func (c *FakeClusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.CreateOptions) (result *v1.ClusterRoleBinding, err error) { obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(clusterrolebindingsResource, clusterRoleBinding), &rbacv1.ClusterRoleBinding{}) + Invokes(testing.NewRootCreateAction(clusterrolebindingsResource, clusterRoleBinding), &v1.ClusterRoleBinding{}) if obj == nil { return nil, err } - return obj.(*rbacv1.ClusterRoleBinding), err + return obj.(*v1.ClusterRoleBinding), err } // Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *FakeClusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *rbacv1.ClusterRoleBinding, opts v1.UpdateOptions) (result *rbacv1.ClusterRoleBinding, err error) { +func (c *FakeClusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.UpdateOptions) (result *v1.ClusterRoleBinding, err error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(clusterrolebindingsResource, clusterRoleBinding), &rbacv1.ClusterRoleBinding{}) + Invokes(testing.NewRootUpdateAction(clusterrolebindingsResource, clusterRoleBinding), &v1.ClusterRoleBinding{}) if obj == nil { return nil, err } - return obj.(*rbacv1.ClusterRoleBinding), err + return obj.(*v1.ClusterRoleBinding), err } // Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. -func (c *FakeClusterRoleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeClusterRoleBindings) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(clusterrolebindingsResource, name, opts), &rbacv1.ClusterRoleBinding{}) + Invokes(testing.NewRootDeleteActionWithOptions(clusterrolebindingsResource, name, opts), &v1.ClusterRoleBinding{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeClusterRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeClusterRoleBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(clusterrolebindingsResource, listOpts) - _, err := c.Fake.Invokes(action, &rbacv1.ClusterRoleBindingList{}) + _, err := c.Fake.Invokes(action, &v1.ClusterRoleBindingList{}) return err } // Patch applies the patch and returns the patched clusterRoleBinding. -func (c *FakeClusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1.ClusterRoleBinding, err error) { +func (c *FakeClusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRoleBinding, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, pt, data, subresources...), &rbacv1.ClusterRoleBinding{}) + Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, pt, data, subresources...), &v1.ClusterRoleBinding{}) if obj == nil { return nil, err } - return obj.(*rbacv1.ClusterRoleBinding), err + return obj.(*v1.ClusterRoleBinding), err } // Apply takes the given apply declarative configuration, applies it and returns the applied clusterRoleBinding. -func (c *FakeClusterRoleBindings) Apply(ctx context.Context, clusterRoleBinding *applyconfigurationsrbacv1.ClusterRoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *rbacv1.ClusterRoleBinding, err error) { +func (c *FakeClusterRoleBindings) Apply(ctx context.Context, clusterRoleBinding *rbacv1.ClusterRoleBindingApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterRoleBinding, err error) { if clusterRoleBinding == nil { return nil, fmt.Errorf("clusterRoleBinding provided to Apply must not be nil") } @@ -138,9 +137,9 @@ func (c *FakeClusterRoleBindings) Apply(ctx context.Context, clusterRoleBinding return nil, fmt.Errorf("clusterRoleBinding.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, *name, types.ApplyPatchType, data), &rbacv1.ClusterRoleBinding{}) + Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, *name, types.ApplyPatchType, data), &v1.ClusterRoleBinding{}) if obj == nil { return nil, err } - return obj.(*rbacv1.ClusterRoleBinding), err + return obj.(*v1.ClusterRoleBinding), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go index 4a70d0c353c8..a3bc5da6636d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - rbacv1 "k8s.io/api/rbac/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationsrbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" + rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" testing "k8s.io/client-go/testing" ) @@ -39,25 +38,25 @@ type FakeRoles struct { ns string } -var rolesResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "roles"} +var rolesResource = v1.SchemeGroupVersion.WithResource("roles") -var rolesKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "Role"} +var rolesKind = v1.SchemeGroupVersion.WithKind("Role") // Get takes name of the role, and returns the corresponding role object, and an error if there is any. -func (c *FakeRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *rbacv1.Role, err error) { +func (c *FakeRoles) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Role, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(rolesResource, c.ns, name), &rbacv1.Role{}) + Invokes(testing.NewGetAction(rolesResource, c.ns, name), &v1.Role{}) if obj == nil { return nil, err } - return obj.(*rbacv1.Role), err + return obj.(*v1.Role), err } // List takes label and field selectors, and returns the list of Roles that match those selectors. -func (c *FakeRoles) List(ctx context.Context, opts v1.ListOptions) (result *rbacv1.RoleList, err error) { +func (c *FakeRoles) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RoleList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(rolesResource, rolesKind, c.ns, opts), &rbacv1.RoleList{}) + Invokes(testing.NewListAction(rolesResource, rolesKind, c.ns, opts), &v1.RoleList{}) if obj == nil { return nil, err @@ -67,8 +66,8 @@ func (c *FakeRoles) List(ctx context.Context, opts v1.ListOptions) (result *rbac if label == nil { label = labels.Everything() } - list := &rbacv1.RoleList{ListMeta: obj.(*rbacv1.RoleList).ListMeta} - for _, item := range obj.(*rbacv1.RoleList).Items { + list := &v1.RoleList{ListMeta: obj.(*v1.RoleList).ListMeta} + for _, item := range obj.(*v1.RoleList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -77,63 +76,63 @@ func (c *FakeRoles) List(ctx context.Context, opts v1.ListOptions) (result *rbac } // Watch returns a watch.Interface that watches the requested roles. -func (c *FakeRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeRoles) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(rolesResource, c.ns, opts)) } // Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *FakeRoles) Create(ctx context.Context, role *rbacv1.Role, opts v1.CreateOptions) (result *rbacv1.Role, err error) { +func (c *FakeRoles) Create(ctx context.Context, role *v1.Role, opts metav1.CreateOptions) (result *v1.Role, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(rolesResource, c.ns, role), &rbacv1.Role{}) + Invokes(testing.NewCreateAction(rolesResource, c.ns, role), &v1.Role{}) if obj == nil { return nil, err } - return obj.(*rbacv1.Role), err + return obj.(*v1.Role), err } // Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *FakeRoles) Update(ctx context.Context, role *rbacv1.Role, opts v1.UpdateOptions) (result *rbacv1.Role, err error) { +func (c *FakeRoles) Update(ctx context.Context, role *v1.Role, opts metav1.UpdateOptions) (result *v1.Role, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(rolesResource, c.ns, role), &rbacv1.Role{}) + Invokes(testing.NewUpdateAction(rolesResource, c.ns, role), &v1.Role{}) if obj == nil { return nil, err } - return obj.(*rbacv1.Role), err + return obj.(*v1.Role), err } // Delete takes name of the role and deletes it. Returns an error if one occurs. -func (c *FakeRoles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeRoles) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(rolesResource, c.ns, name, opts), &rbacv1.Role{}) + Invokes(testing.NewDeleteActionWithOptions(rolesResource, c.ns, name, opts), &v1.Role{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeRoles) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewDeleteCollectionAction(rolesResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &rbacv1.RoleList{}) + _, err := c.Fake.Invokes(action, &v1.RoleList{}) return err } // Patch applies the patch and returns the patched role. -func (c *FakeRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1.Role, err error) { +func (c *FakeRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Role, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, pt, data, subresources...), &rbacv1.Role{}) + Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, pt, data, subresources...), &v1.Role{}) if obj == nil { return nil, err } - return obj.(*rbacv1.Role), err + return obj.(*v1.Role), err } // Apply takes the given apply declarative configuration, applies it and returns the applied role. -func (c *FakeRoles) Apply(ctx context.Context, role *applyconfigurationsrbacv1.RoleApplyConfiguration, opts v1.ApplyOptions) (result *rbacv1.Role, err error) { +func (c *FakeRoles) Apply(ctx context.Context, role *rbacv1.RoleApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Role, err error) { if role == nil { return nil, fmt.Errorf("role provided to Apply must not be nil") } @@ -146,10 +145,10 @@ func (c *FakeRoles) Apply(ctx context.Context, role *applyconfigurationsrbacv1.R return nil, fmt.Errorf("role.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, *name, types.ApplyPatchType, data), &rbacv1.Role{}) + Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, *name, types.ApplyPatchType, data), &v1.Role{}) if obj == nil { return nil, err } - return obj.(*rbacv1.Role), err + return obj.(*v1.Role), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go index 0db37dd57b5f..eeb37e9db30c 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - rbacv1 "k8s.io/api/rbac/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationsrbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" + rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" testing "k8s.io/client-go/testing" ) @@ -39,25 +38,25 @@ type FakeRoleBindings struct { ns string } -var rolebindingsResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "rolebindings"} +var rolebindingsResource = v1.SchemeGroupVersion.WithResource("rolebindings") -var rolebindingsKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "RoleBinding"} +var rolebindingsKind = v1.SchemeGroupVersion.WithKind("RoleBinding") // Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. -func (c *FakeRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *rbacv1.RoleBinding, err error) { +func (c *FakeRoleBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.RoleBinding, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(rolebindingsResource, c.ns, name), &rbacv1.RoleBinding{}) + Invokes(testing.NewGetAction(rolebindingsResource, c.ns, name), &v1.RoleBinding{}) if obj == nil { return nil, err } - return obj.(*rbacv1.RoleBinding), err + return obj.(*v1.RoleBinding), err } // List takes label and field selectors, and returns the list of RoleBindings that match those selectors. -func (c *FakeRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *rbacv1.RoleBindingList, err error) { +func (c *FakeRoleBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RoleBindingList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(rolebindingsResource, rolebindingsKind, c.ns, opts), &rbacv1.RoleBindingList{}) + Invokes(testing.NewListAction(rolebindingsResource, rolebindingsKind, c.ns, opts), &v1.RoleBindingList{}) if obj == nil { return nil, err @@ -67,8 +66,8 @@ func (c *FakeRoleBindings) List(ctx context.Context, opts v1.ListOptions) (resul if label == nil { label = labels.Everything() } - list := &rbacv1.RoleBindingList{ListMeta: obj.(*rbacv1.RoleBindingList).ListMeta} - for _, item := range obj.(*rbacv1.RoleBindingList).Items { + list := &v1.RoleBindingList{ListMeta: obj.(*v1.RoleBindingList).ListMeta} + for _, item := range obj.(*v1.RoleBindingList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -77,63 +76,63 @@ func (c *FakeRoleBindings) List(ctx context.Context, opts v1.ListOptions) (resul } // Watch returns a watch.Interface that watches the requested roleBindings. -func (c *FakeRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeRoleBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(rolebindingsResource, c.ns, opts)) } // Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *FakeRoleBindings) Create(ctx context.Context, roleBinding *rbacv1.RoleBinding, opts v1.CreateOptions) (result *rbacv1.RoleBinding, err error) { +func (c *FakeRoleBindings) Create(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.CreateOptions) (result *v1.RoleBinding, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(rolebindingsResource, c.ns, roleBinding), &rbacv1.RoleBinding{}) + Invokes(testing.NewCreateAction(rolebindingsResource, c.ns, roleBinding), &v1.RoleBinding{}) if obj == nil { return nil, err } - return obj.(*rbacv1.RoleBinding), err + return obj.(*v1.RoleBinding), err } // Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *FakeRoleBindings) Update(ctx context.Context, roleBinding *rbacv1.RoleBinding, opts v1.UpdateOptions) (result *rbacv1.RoleBinding, err error) { +func (c *FakeRoleBindings) Update(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.UpdateOptions) (result *v1.RoleBinding, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(rolebindingsResource, c.ns, roleBinding), &rbacv1.RoleBinding{}) + Invokes(testing.NewUpdateAction(rolebindingsResource, c.ns, roleBinding), &v1.RoleBinding{}) if obj == nil { return nil, err } - return obj.(*rbacv1.RoleBinding), err + return obj.(*v1.RoleBinding), err } // Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. -func (c *FakeRoleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeRoleBindings) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(rolebindingsResource, c.ns, name, opts), &rbacv1.RoleBinding{}) + Invokes(testing.NewDeleteActionWithOptions(rolebindingsResource, c.ns, name, opts), &v1.RoleBinding{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeRoleBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewDeleteCollectionAction(rolebindingsResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &rbacv1.RoleBindingList{}) + _, err := c.Fake.Invokes(action, &v1.RoleBindingList{}) return err } // Patch applies the patch and returns the patched roleBinding. -func (c *FakeRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1.RoleBinding, err error) { +func (c *FakeRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RoleBinding, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, pt, data, subresources...), &rbacv1.RoleBinding{}) + Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, pt, data, subresources...), &v1.RoleBinding{}) if obj == nil { return nil, err } - return obj.(*rbacv1.RoleBinding), err + return obj.(*v1.RoleBinding), err } // Apply takes the given apply declarative configuration, applies it and returns the applied roleBinding. -func (c *FakeRoleBindings) Apply(ctx context.Context, roleBinding *applyconfigurationsrbacv1.RoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *rbacv1.RoleBinding, err error) { +func (c *FakeRoleBindings) Apply(ctx context.Context, roleBinding *rbacv1.RoleBindingApplyConfiguration, opts metav1.ApplyOptions) (result *v1.RoleBinding, err error) { if roleBinding == nil { return nil, fmt.Errorf("roleBinding provided to Apply must not be nil") } @@ -146,10 +145,10 @@ func (c *FakeRoleBindings) Apply(ctx context.Context, roleBinding *applyconfigur return nil, fmt.Errorf("roleBinding.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, *name, types.ApplyPatchType, data), &rbacv1.RoleBinding{}) + Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, *name, types.ApplyPatchType, data), &v1.RoleBinding{}) if obj == nil { return nil, err } - return obj.(*rbacv1.RoleBinding), err + return obj.(*v1.RoleBinding), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go index 7e8e3a9b8aa4..534a1990f507 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go @@ -26,7 +26,6 @@ import ( v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" @@ -38,9 +37,9 @@ type FakeClusterRoles struct { Fake *FakeRbacV1alpha1 } -var clusterrolesResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Resource: "clusterroles"} +var clusterrolesResource = v1alpha1.SchemeGroupVersion.WithResource("clusterroles") -var clusterrolesKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Kind: "ClusterRole"} +var clusterrolesKind = v1alpha1.SchemeGroupVersion.WithKind("ClusterRole") // Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. func (c *FakeClusterRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterRole, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go index 5c5e632789c1..0a4359392ddf 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go @@ -26,7 +26,6 @@ import ( v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" @@ -38,9 +37,9 @@ type FakeClusterRoleBindings struct { Fake *FakeRbacV1alpha1 } -var clusterrolebindingsResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Resource: "clusterrolebindings"} +var clusterrolebindingsResource = v1alpha1.SchemeGroupVersion.WithResource("clusterrolebindings") -var clusterrolebindingsKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Kind: "ClusterRoleBinding"} +var clusterrolebindingsKind = v1alpha1.SchemeGroupVersion.WithKind("ClusterRoleBinding") // Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. func (c *FakeClusterRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterRoleBinding, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go index cec3f099dc5a..a0e28348ae48 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go @@ -26,7 +26,6 @@ import ( v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" @@ -39,9 +38,9 @@ type FakeRoles struct { ns string } -var rolesResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Resource: "roles"} +var rolesResource = v1alpha1.SchemeGroupVersion.WithResource("roles") -var rolesKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Kind: "Role"} +var rolesKind = v1alpha1.SchemeGroupVersion.WithKind("Role") // Get takes name of the role, and returns the corresponding role object, and an error if there is any. func (c *FakeRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Role, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go index 2d66494fdc87..76649f5c2b34 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go @@ -26,7 +26,6 @@ import ( v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" @@ -39,9 +38,9 @@ type FakeRoleBindings struct { ns string } -var rolebindingsResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Resource: "rolebindings"} +var rolebindingsResource = v1alpha1.SchemeGroupVersion.WithResource("rolebindings") -var rolebindingsKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Kind: "RoleBinding"} +var rolebindingsKind = v1alpha1.SchemeGroupVersion.WithKind("RoleBinding") // Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. func (c *FakeRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RoleBinding, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go index 2b4f60054e08..2a94a4315eb2 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go @@ -26,7 +26,6 @@ import ( v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" @@ -38,9 +37,9 @@ type FakeClusterRoles struct { Fake *FakeRbacV1beta1 } -var clusterrolesResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "clusterroles"} +var clusterrolesResource = v1beta1.SchemeGroupVersion.WithResource("clusterroles") -var clusterrolesKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Kind: "ClusterRole"} +var clusterrolesKind = v1beta1.SchemeGroupVersion.WithKind("ClusterRole") // Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. func (c *FakeClusterRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ClusterRole, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go index 379261ec86fa..c9fd7c0cdd00 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go @@ -26,7 +26,6 @@ import ( v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" @@ -38,9 +37,9 @@ type FakeClusterRoleBindings struct { Fake *FakeRbacV1beta1 } -var clusterrolebindingsResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "clusterrolebindings"} +var clusterrolebindingsResource = v1beta1.SchemeGroupVersion.WithResource("clusterrolebindings") -var clusterrolebindingsKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Kind: "ClusterRoleBinding"} +var clusterrolebindingsKind = v1beta1.SchemeGroupVersion.WithKind("ClusterRoleBinding") // Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. func (c *FakeClusterRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ClusterRoleBinding, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go index 44df9128a0e8..4158cf1d55ab 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go @@ -26,7 +26,6 @@ import ( v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" @@ -39,9 +38,9 @@ type FakeRoles struct { ns string } -var rolesResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "roles"} +var rolesResource = v1beta1.SchemeGroupVersion.WithResource("roles") -var rolesKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Kind: "Role"} +var rolesKind = v1beta1.SchemeGroupVersion.WithKind("Role") // Get takes name of the role, and returns the corresponding role object, and an error if there is any. func (c *FakeRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Role, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go index 5e90e6dbd15c..4616f0fd10e9 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go @@ -26,7 +26,6 @@ import ( v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" @@ -39,9 +38,9 @@ type FakeRoleBindings struct { ns string } -var rolebindingsResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "rolebindings"} +var rolebindingsResource = v1beta1.SchemeGroupVersion.WithResource("rolebindings") -var rolebindingsKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Kind: "RoleBinding"} +var rolebindingsKind = v1beta1.SchemeGroupVersion.WithKind("RoleBinding") // Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. func (c *FakeRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.RoleBinding, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake/fake_podscheduling.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake/fake_podscheduling.go index 7b4e2a5d5a4d..3e43fee49a7d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake/fake_podscheduling.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake/fake_podscheduling.go @@ -26,7 +26,6 @@ import ( v1alpha1 "k8s.io/api/resource/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" resourcev1alpha1 "k8s.io/client-go/applyconfigurations/resource/v1alpha1" @@ -39,9 +38,9 @@ type FakePodSchedulings struct { ns string } -var podschedulingsResource = schema.GroupVersionResource{Group: "resource.k8s.io", Version: "v1alpha1", Resource: "podschedulings"} +var podschedulingsResource = v1alpha1.SchemeGroupVersion.WithResource("podschedulings") -var podschedulingsKind = schema.GroupVersionKind{Group: "resource.k8s.io", Version: "v1alpha1", Kind: "PodScheduling"} +var podschedulingsKind = v1alpha1.SchemeGroupVersion.WithKind("PodScheduling") // Get takes name of the podScheduling, and returns the corresponding podScheduling object, and an error if there is any. func (c *FakePodSchedulings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PodScheduling, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake/fake_resourceclaim.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake/fake_resourceclaim.go index a458459f84fa..a5fe28dc5a30 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake/fake_resourceclaim.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake/fake_resourceclaim.go @@ -26,7 +26,6 @@ import ( v1alpha1 "k8s.io/api/resource/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" resourcev1alpha1 "k8s.io/client-go/applyconfigurations/resource/v1alpha1" @@ -39,9 +38,9 @@ type FakeResourceClaims struct { ns string } -var resourceclaimsResource = schema.GroupVersionResource{Group: "resource.k8s.io", Version: "v1alpha1", Resource: "resourceclaims"} +var resourceclaimsResource = v1alpha1.SchemeGroupVersion.WithResource("resourceclaims") -var resourceclaimsKind = schema.GroupVersionKind{Group: "resource.k8s.io", Version: "v1alpha1", Kind: "ResourceClaim"} +var resourceclaimsKind = v1alpha1.SchemeGroupVersion.WithKind("ResourceClaim") // Get takes name of the resourceClaim, and returns the corresponding resourceClaim object, and an error if there is any. func (c *FakeResourceClaims) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ResourceClaim, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake/fake_resourceclaimtemplate.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake/fake_resourceclaimtemplate.go index be22ab6d3709..47b78e22a3f9 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake/fake_resourceclaimtemplate.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake/fake_resourceclaimtemplate.go @@ -26,7 +26,6 @@ import ( v1alpha1 "k8s.io/api/resource/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" resourcev1alpha1 "k8s.io/client-go/applyconfigurations/resource/v1alpha1" @@ -39,9 +38,9 @@ type FakeResourceClaimTemplates struct { ns string } -var resourceclaimtemplatesResource = schema.GroupVersionResource{Group: "resource.k8s.io", Version: "v1alpha1", Resource: "resourceclaimtemplates"} +var resourceclaimtemplatesResource = v1alpha1.SchemeGroupVersion.WithResource("resourceclaimtemplates") -var resourceclaimtemplatesKind = schema.GroupVersionKind{Group: "resource.k8s.io", Version: "v1alpha1", Kind: "ResourceClaimTemplate"} +var resourceclaimtemplatesKind = v1alpha1.SchemeGroupVersion.WithKind("ResourceClaimTemplate") // Get takes name of the resourceClaimTemplate, and returns the corresponding resourceClaimTemplate object, and an error if there is any. func (c *FakeResourceClaimTemplates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ResourceClaimTemplate, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake/fake_resourceclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake/fake_resourceclass.go index 80fb7ec034b3..47ccbd6310b2 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake/fake_resourceclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake/fake_resourceclass.go @@ -26,7 +26,6 @@ import ( v1alpha1 "k8s.io/api/resource/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" resourcev1alpha1 "k8s.io/client-go/applyconfigurations/resource/v1alpha1" @@ -38,9 +37,9 @@ type FakeResourceClasses struct { Fake *FakeResourceV1alpha1 } -var resourceclassesResource = schema.GroupVersionResource{Group: "resource.k8s.io", Version: "v1alpha1", Resource: "resourceclasses"} +var resourceclassesResource = v1alpha1.SchemeGroupVersion.WithResource("resourceclasses") -var resourceclassesKind = schema.GroupVersionKind{Group: "resource.k8s.io", Version: "v1alpha1", Kind: "ResourceClass"} +var resourceclassesKind = v1alpha1.SchemeGroupVersion.WithKind("ResourceClass") // Get takes name of the resourceClass, and returns the corresponding resourceClass object, and an error if there is any. func (c *FakeResourceClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ResourceClass, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go index 30f82270e7bb..40ab9fb40755 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - schedulingv1 "k8s.io/api/scheduling/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/scheduling/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationsschedulingv1 "k8s.io/client-go/applyconfigurations/scheduling/v1" + schedulingv1 "k8s.io/client-go/applyconfigurations/scheduling/v1" testing "k8s.io/client-go/testing" ) @@ -38,24 +37,24 @@ type FakePriorityClasses struct { Fake *FakeSchedulingV1 } -var priorityclassesResource = schema.GroupVersionResource{Group: "scheduling.k8s.io", Version: "v1", Resource: "priorityclasses"} +var priorityclassesResource = v1.SchemeGroupVersion.WithResource("priorityclasses") -var priorityclassesKind = schema.GroupVersionKind{Group: "scheduling.k8s.io", Version: "v1", Kind: "PriorityClass"} +var priorityclassesKind = v1.SchemeGroupVersion.WithKind("PriorityClass") // Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. -func (c *FakePriorityClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *schedulingv1.PriorityClass, err error) { +func (c *FakePriorityClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PriorityClass, err error) { obj, err := c.Fake. - Invokes(testing.NewRootGetAction(priorityclassesResource, name), &schedulingv1.PriorityClass{}) + Invokes(testing.NewRootGetAction(priorityclassesResource, name), &v1.PriorityClass{}) if obj == nil { return nil, err } - return obj.(*schedulingv1.PriorityClass), err + return obj.(*v1.PriorityClass), err } // List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. -func (c *FakePriorityClasses) List(ctx context.Context, opts v1.ListOptions) (result *schedulingv1.PriorityClassList, err error) { +func (c *FakePriorityClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PriorityClassList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootListAction(priorityclassesResource, priorityclassesKind, opts), &schedulingv1.PriorityClassList{}) + Invokes(testing.NewRootListAction(priorityclassesResource, priorityclassesKind, opts), &v1.PriorityClassList{}) if obj == nil { return nil, err } @@ -64,8 +63,8 @@ func (c *FakePriorityClasses) List(ctx context.Context, opts v1.ListOptions) (re if label == nil { label = labels.Everything() } - list := &schedulingv1.PriorityClassList{ListMeta: obj.(*schedulingv1.PriorityClassList).ListMeta} - for _, item := range obj.(*schedulingv1.PriorityClassList).Items { + list := &v1.PriorityClassList{ListMeta: obj.(*v1.PriorityClassList).ListMeta} + for _, item := range obj.(*v1.PriorityClassList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -74,58 +73,58 @@ func (c *FakePriorityClasses) List(ctx context.Context, opts v1.ListOptions) (re } // Watch returns a watch.Interface that watches the requested priorityClasses. -func (c *FakePriorityClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakePriorityClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(priorityclassesResource, opts)) } // Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *FakePriorityClasses) Create(ctx context.Context, priorityClass *schedulingv1.PriorityClass, opts v1.CreateOptions) (result *schedulingv1.PriorityClass, err error) { +func (c *FakePriorityClasses) Create(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.CreateOptions) (result *v1.PriorityClass, err error) { obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(priorityclassesResource, priorityClass), &schedulingv1.PriorityClass{}) + Invokes(testing.NewRootCreateAction(priorityclassesResource, priorityClass), &v1.PriorityClass{}) if obj == nil { return nil, err } - return obj.(*schedulingv1.PriorityClass), err + return obj.(*v1.PriorityClass), err } // Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *FakePriorityClasses) Update(ctx context.Context, priorityClass *schedulingv1.PriorityClass, opts v1.UpdateOptions) (result *schedulingv1.PriorityClass, err error) { +func (c *FakePriorityClasses) Update(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.UpdateOptions) (result *v1.PriorityClass, err error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(priorityclassesResource, priorityClass), &schedulingv1.PriorityClass{}) + Invokes(testing.NewRootUpdateAction(priorityclassesResource, priorityClass), &v1.PriorityClass{}) if obj == nil { return nil, err } - return obj.(*schedulingv1.PriorityClass), err + return obj.(*v1.PriorityClass), err } // Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. -func (c *FakePriorityClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakePriorityClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(priorityclassesResource, name, opts), &schedulingv1.PriorityClass{}) + Invokes(testing.NewRootDeleteActionWithOptions(priorityclassesResource, name, opts), &v1.PriorityClass{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakePriorityClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakePriorityClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(priorityclassesResource, listOpts) - _, err := c.Fake.Invokes(action, &schedulingv1.PriorityClassList{}) + _, err := c.Fake.Invokes(action, &v1.PriorityClassList{}) return err } // Patch applies the patch and returns the patched priorityClass. -func (c *FakePriorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *schedulingv1.PriorityClass, err error) { +func (c *FakePriorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityClass, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, name, pt, data, subresources...), &schedulingv1.PriorityClass{}) + Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, name, pt, data, subresources...), &v1.PriorityClass{}) if obj == nil { return nil, err } - return obj.(*schedulingv1.PriorityClass), err + return obj.(*v1.PriorityClass), err } // Apply takes the given apply declarative configuration, applies it and returns the applied priorityClass. -func (c *FakePriorityClasses) Apply(ctx context.Context, priorityClass *applyconfigurationsschedulingv1.PriorityClassApplyConfiguration, opts v1.ApplyOptions) (result *schedulingv1.PriorityClass, err error) { +func (c *FakePriorityClasses) Apply(ctx context.Context, priorityClass *schedulingv1.PriorityClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityClass, err error) { if priorityClass == nil { return nil, fmt.Errorf("priorityClass provided to Apply must not be nil") } @@ -138,9 +137,9 @@ func (c *FakePriorityClasses) Apply(ctx context.Context, priorityClass *applycon return nil, fmt.Errorf("priorityClass.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, *name, types.ApplyPatchType, data), &schedulingv1.PriorityClass{}) + Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, *name, types.ApplyPatchType, data), &v1.PriorityClass{}) if obj == nil { return nil, err } - return obj.(*schedulingv1.PriorityClass), err + return obj.(*v1.PriorityClass), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go index 32b6a11cb6ce..3c8404a725a7 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go @@ -26,7 +26,6 @@ import ( v1alpha1 "k8s.io/api/scheduling/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" schedulingv1alpha1 "k8s.io/client-go/applyconfigurations/scheduling/v1alpha1" @@ -38,9 +37,9 @@ type FakePriorityClasses struct { Fake *FakeSchedulingV1alpha1 } -var priorityclassesResource = schema.GroupVersionResource{Group: "scheduling.k8s.io", Version: "v1alpha1", Resource: "priorityclasses"} +var priorityclassesResource = v1alpha1.SchemeGroupVersion.WithResource("priorityclasses") -var priorityclassesKind = schema.GroupVersionKind{Group: "scheduling.k8s.io", Version: "v1alpha1", Kind: "PriorityClass"} +var priorityclassesKind = v1alpha1.SchemeGroupVersion.WithKind("PriorityClass") // Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. func (c *FakePriorityClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PriorityClass, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go index 8ff5a1bd6136..4cf2e26c7752 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go @@ -26,7 +26,6 @@ import ( v1beta1 "k8s.io/api/scheduling/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" schedulingv1beta1 "k8s.io/client-go/applyconfigurations/scheduling/v1beta1" @@ -38,9 +37,9 @@ type FakePriorityClasses struct { Fake *FakeSchedulingV1beta1 } -var priorityclassesResource = schema.GroupVersionResource{Group: "scheduling.k8s.io", Version: "v1beta1", Resource: "priorityclasses"} +var priorityclassesResource = v1beta1.SchemeGroupVersion.WithResource("priorityclasses") -var priorityclassesKind = schema.GroupVersionKind{Group: "scheduling.k8s.io", Version: "v1beta1", Kind: "PriorityClass"} +var priorityclassesKind = v1beta1.SchemeGroupVersion.WithKind("PriorityClass") // Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. func (c *FakePriorityClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PriorityClass, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csidriver.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csidriver.go index 42df475dcef3..49832273762e 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csidriver.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csidriver.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - storagev1 "k8s.io/api/storage/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationsstoragev1 "k8s.io/client-go/applyconfigurations/storage/v1" + storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" testing "k8s.io/client-go/testing" ) @@ -38,24 +37,24 @@ type FakeCSIDrivers struct { Fake *FakeStorageV1 } -var csidriversResource = schema.GroupVersionResource{Group: "storage.k8s.io", Version: "v1", Resource: "csidrivers"} +var csidriversResource = v1.SchemeGroupVersion.WithResource("csidrivers") -var csidriversKind = schema.GroupVersionKind{Group: "storage.k8s.io", Version: "v1", Kind: "CSIDriver"} +var csidriversKind = v1.SchemeGroupVersion.WithKind("CSIDriver") // Get takes name of the cSIDriver, and returns the corresponding cSIDriver object, and an error if there is any. -func (c *FakeCSIDrivers) Get(ctx context.Context, name string, options v1.GetOptions) (result *storagev1.CSIDriver, err error) { +func (c *FakeCSIDrivers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSIDriver, err error) { obj, err := c.Fake. - Invokes(testing.NewRootGetAction(csidriversResource, name), &storagev1.CSIDriver{}) + Invokes(testing.NewRootGetAction(csidriversResource, name), &v1.CSIDriver{}) if obj == nil { return nil, err } - return obj.(*storagev1.CSIDriver), err + return obj.(*v1.CSIDriver), err } // List takes label and field selectors, and returns the list of CSIDrivers that match those selectors. -func (c *FakeCSIDrivers) List(ctx context.Context, opts v1.ListOptions) (result *storagev1.CSIDriverList, err error) { +func (c *FakeCSIDrivers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSIDriverList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootListAction(csidriversResource, csidriversKind, opts), &storagev1.CSIDriverList{}) + Invokes(testing.NewRootListAction(csidriversResource, csidriversKind, opts), &v1.CSIDriverList{}) if obj == nil { return nil, err } @@ -64,8 +63,8 @@ func (c *FakeCSIDrivers) List(ctx context.Context, opts v1.ListOptions) (result if label == nil { label = labels.Everything() } - list := &storagev1.CSIDriverList{ListMeta: obj.(*storagev1.CSIDriverList).ListMeta} - for _, item := range obj.(*storagev1.CSIDriverList).Items { + list := &v1.CSIDriverList{ListMeta: obj.(*v1.CSIDriverList).ListMeta} + for _, item := range obj.(*v1.CSIDriverList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -74,58 +73,58 @@ func (c *FakeCSIDrivers) List(ctx context.Context, opts v1.ListOptions) (result } // Watch returns a watch.Interface that watches the requested cSIDrivers. -func (c *FakeCSIDrivers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeCSIDrivers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(csidriversResource, opts)) } // Create takes the representation of a cSIDriver and creates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *FakeCSIDrivers) Create(ctx context.Context, cSIDriver *storagev1.CSIDriver, opts v1.CreateOptions) (result *storagev1.CSIDriver, err error) { +func (c *FakeCSIDrivers) Create(ctx context.Context, cSIDriver *v1.CSIDriver, opts metav1.CreateOptions) (result *v1.CSIDriver, err error) { obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(csidriversResource, cSIDriver), &storagev1.CSIDriver{}) + Invokes(testing.NewRootCreateAction(csidriversResource, cSIDriver), &v1.CSIDriver{}) if obj == nil { return nil, err } - return obj.(*storagev1.CSIDriver), err + return obj.(*v1.CSIDriver), err } // Update takes the representation of a cSIDriver and updates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *FakeCSIDrivers) Update(ctx context.Context, cSIDriver *storagev1.CSIDriver, opts v1.UpdateOptions) (result *storagev1.CSIDriver, err error) { +func (c *FakeCSIDrivers) Update(ctx context.Context, cSIDriver *v1.CSIDriver, opts metav1.UpdateOptions) (result *v1.CSIDriver, err error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(csidriversResource, cSIDriver), &storagev1.CSIDriver{}) + Invokes(testing.NewRootUpdateAction(csidriversResource, cSIDriver), &v1.CSIDriver{}) if obj == nil { return nil, err } - return obj.(*storagev1.CSIDriver), err + return obj.(*v1.CSIDriver), err } // Delete takes name of the cSIDriver and deletes it. Returns an error if one occurs. -func (c *FakeCSIDrivers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeCSIDrivers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(csidriversResource, name, opts), &storagev1.CSIDriver{}) + Invokes(testing.NewRootDeleteActionWithOptions(csidriversResource, name, opts), &v1.CSIDriver{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeCSIDrivers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeCSIDrivers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(csidriversResource, listOpts) - _, err := c.Fake.Invokes(action, &storagev1.CSIDriverList{}) + _, err := c.Fake.Invokes(action, &v1.CSIDriverList{}) return err } // Patch applies the patch and returns the patched cSIDriver. -func (c *FakeCSIDrivers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1.CSIDriver, err error) { +func (c *FakeCSIDrivers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSIDriver, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(csidriversResource, name, pt, data, subresources...), &storagev1.CSIDriver{}) + Invokes(testing.NewRootPatchSubresourceAction(csidriversResource, name, pt, data, subresources...), &v1.CSIDriver{}) if obj == nil { return nil, err } - return obj.(*storagev1.CSIDriver), err + return obj.(*v1.CSIDriver), err } // Apply takes the given apply declarative configuration, applies it and returns the applied cSIDriver. -func (c *FakeCSIDrivers) Apply(ctx context.Context, cSIDriver *applyconfigurationsstoragev1.CSIDriverApplyConfiguration, opts v1.ApplyOptions) (result *storagev1.CSIDriver, err error) { +func (c *FakeCSIDrivers) Apply(ctx context.Context, cSIDriver *storagev1.CSIDriverApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CSIDriver, err error) { if cSIDriver == nil { return nil, fmt.Errorf("cSIDriver provided to Apply must not be nil") } @@ -138,9 +137,9 @@ func (c *FakeCSIDrivers) Apply(ctx context.Context, cSIDriver *applyconfiguratio return nil, fmt.Errorf("cSIDriver.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(csidriversResource, *name, types.ApplyPatchType, data), &storagev1.CSIDriver{}) + Invokes(testing.NewRootPatchSubresourceAction(csidriversResource, *name, types.ApplyPatchType, data), &v1.CSIDriver{}) if obj == nil { return nil, err } - return obj.(*storagev1.CSIDriver), err + return obj.(*v1.CSIDriver), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go index 4041d5e63fce..0271a20f3d47 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - storagev1 "k8s.io/api/storage/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationsstoragev1 "k8s.io/client-go/applyconfigurations/storage/v1" + storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" testing "k8s.io/client-go/testing" ) @@ -38,24 +37,24 @@ type FakeCSINodes struct { Fake *FakeStorageV1 } -var csinodesResource = schema.GroupVersionResource{Group: "storage.k8s.io", Version: "v1", Resource: "csinodes"} +var csinodesResource = v1.SchemeGroupVersion.WithResource("csinodes") -var csinodesKind = schema.GroupVersionKind{Group: "storage.k8s.io", Version: "v1", Kind: "CSINode"} +var csinodesKind = v1.SchemeGroupVersion.WithKind("CSINode") // Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any. -func (c *FakeCSINodes) Get(ctx context.Context, name string, options v1.GetOptions) (result *storagev1.CSINode, err error) { +func (c *FakeCSINodes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSINode, err error) { obj, err := c.Fake. - Invokes(testing.NewRootGetAction(csinodesResource, name), &storagev1.CSINode{}) + Invokes(testing.NewRootGetAction(csinodesResource, name), &v1.CSINode{}) if obj == nil { return nil, err } - return obj.(*storagev1.CSINode), err + return obj.(*v1.CSINode), err } // List takes label and field selectors, and returns the list of CSINodes that match those selectors. -func (c *FakeCSINodes) List(ctx context.Context, opts v1.ListOptions) (result *storagev1.CSINodeList, err error) { +func (c *FakeCSINodes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSINodeList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootListAction(csinodesResource, csinodesKind, opts), &storagev1.CSINodeList{}) + Invokes(testing.NewRootListAction(csinodesResource, csinodesKind, opts), &v1.CSINodeList{}) if obj == nil { return nil, err } @@ -64,8 +63,8 @@ func (c *FakeCSINodes) List(ctx context.Context, opts v1.ListOptions) (result *s if label == nil { label = labels.Everything() } - list := &storagev1.CSINodeList{ListMeta: obj.(*storagev1.CSINodeList).ListMeta} - for _, item := range obj.(*storagev1.CSINodeList).Items { + list := &v1.CSINodeList{ListMeta: obj.(*v1.CSINodeList).ListMeta} + for _, item := range obj.(*v1.CSINodeList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -74,58 +73,58 @@ func (c *FakeCSINodes) List(ctx context.Context, opts v1.ListOptions) (result *s } // Watch returns a watch.Interface that watches the requested cSINodes. -func (c *FakeCSINodes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeCSINodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(csinodesResource, opts)) } // Create takes the representation of a cSINode and creates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *FakeCSINodes) Create(ctx context.Context, cSINode *storagev1.CSINode, opts v1.CreateOptions) (result *storagev1.CSINode, err error) { +func (c *FakeCSINodes) Create(ctx context.Context, cSINode *v1.CSINode, opts metav1.CreateOptions) (result *v1.CSINode, err error) { obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(csinodesResource, cSINode), &storagev1.CSINode{}) + Invokes(testing.NewRootCreateAction(csinodesResource, cSINode), &v1.CSINode{}) if obj == nil { return nil, err } - return obj.(*storagev1.CSINode), err + return obj.(*v1.CSINode), err } // Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *FakeCSINodes) Update(ctx context.Context, cSINode *storagev1.CSINode, opts v1.UpdateOptions) (result *storagev1.CSINode, err error) { +func (c *FakeCSINodes) Update(ctx context.Context, cSINode *v1.CSINode, opts metav1.UpdateOptions) (result *v1.CSINode, err error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(csinodesResource, cSINode), &storagev1.CSINode{}) + Invokes(testing.NewRootUpdateAction(csinodesResource, cSINode), &v1.CSINode{}) if obj == nil { return nil, err } - return obj.(*storagev1.CSINode), err + return obj.(*v1.CSINode), err } // Delete takes name of the cSINode and deletes it. Returns an error if one occurs. -func (c *FakeCSINodes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeCSINodes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(csinodesResource, name, opts), &storagev1.CSINode{}) + Invokes(testing.NewRootDeleteActionWithOptions(csinodesResource, name, opts), &v1.CSINode{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeCSINodes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeCSINodes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(csinodesResource, listOpts) - _, err := c.Fake.Invokes(action, &storagev1.CSINodeList{}) + _, err := c.Fake.Invokes(action, &v1.CSINodeList{}) return err } // Patch applies the patch and returns the patched cSINode. -func (c *FakeCSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1.CSINode, err error) { +func (c *FakeCSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSINode, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(csinodesResource, name, pt, data, subresources...), &storagev1.CSINode{}) + Invokes(testing.NewRootPatchSubresourceAction(csinodesResource, name, pt, data, subresources...), &v1.CSINode{}) if obj == nil { return nil, err } - return obj.(*storagev1.CSINode), err + return obj.(*v1.CSINode), err } // Apply takes the given apply declarative configuration, applies it and returns the applied cSINode. -func (c *FakeCSINodes) Apply(ctx context.Context, cSINode *applyconfigurationsstoragev1.CSINodeApplyConfiguration, opts v1.ApplyOptions) (result *storagev1.CSINode, err error) { +func (c *FakeCSINodes) Apply(ctx context.Context, cSINode *storagev1.CSINodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CSINode, err error) { if cSINode == nil { return nil, fmt.Errorf("cSINode provided to Apply must not be nil") } @@ -138,9 +137,9 @@ func (c *FakeCSINodes) Apply(ctx context.Context, cSINode *applyconfigurationsst return nil, fmt.Errorf("cSINode.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(csinodesResource, *name, types.ApplyPatchType, data), &storagev1.CSINode{}) + Invokes(testing.NewRootPatchSubresourceAction(csinodesResource, *name, types.ApplyPatchType, data), &v1.CSINode{}) if obj == nil { return nil, err } - return obj.(*storagev1.CSINode), err + return obj.(*v1.CSINode), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csistoragecapacity.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csistoragecapacity.go index 40c4171c8035..b12bbe3c15c2 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csistoragecapacity.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csistoragecapacity.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - storagev1 "k8s.io/api/storage/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationsstoragev1 "k8s.io/client-go/applyconfigurations/storage/v1" + storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" testing "k8s.io/client-go/testing" ) @@ -39,25 +38,25 @@ type FakeCSIStorageCapacities struct { ns string } -var csistoragecapacitiesResource = schema.GroupVersionResource{Group: "storage.k8s.io", Version: "v1", Resource: "csistoragecapacities"} +var csistoragecapacitiesResource = v1.SchemeGroupVersion.WithResource("csistoragecapacities") -var csistoragecapacitiesKind = schema.GroupVersionKind{Group: "storage.k8s.io", Version: "v1", Kind: "CSIStorageCapacity"} +var csistoragecapacitiesKind = v1.SchemeGroupVersion.WithKind("CSIStorageCapacity") // Get takes name of the cSIStorageCapacity, and returns the corresponding cSIStorageCapacity object, and an error if there is any. -func (c *FakeCSIStorageCapacities) Get(ctx context.Context, name string, options v1.GetOptions) (result *storagev1.CSIStorageCapacity, err error) { +func (c *FakeCSIStorageCapacities) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSIStorageCapacity, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(csistoragecapacitiesResource, c.ns, name), &storagev1.CSIStorageCapacity{}) + Invokes(testing.NewGetAction(csistoragecapacitiesResource, c.ns, name), &v1.CSIStorageCapacity{}) if obj == nil { return nil, err } - return obj.(*storagev1.CSIStorageCapacity), err + return obj.(*v1.CSIStorageCapacity), err } // List takes label and field selectors, and returns the list of CSIStorageCapacities that match those selectors. -func (c *FakeCSIStorageCapacities) List(ctx context.Context, opts v1.ListOptions) (result *storagev1.CSIStorageCapacityList, err error) { +func (c *FakeCSIStorageCapacities) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSIStorageCapacityList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(csistoragecapacitiesResource, csistoragecapacitiesKind, c.ns, opts), &storagev1.CSIStorageCapacityList{}) + Invokes(testing.NewListAction(csistoragecapacitiesResource, csistoragecapacitiesKind, c.ns, opts), &v1.CSIStorageCapacityList{}) if obj == nil { return nil, err @@ -67,8 +66,8 @@ func (c *FakeCSIStorageCapacities) List(ctx context.Context, opts v1.ListOptions if label == nil { label = labels.Everything() } - list := &storagev1.CSIStorageCapacityList{ListMeta: obj.(*storagev1.CSIStorageCapacityList).ListMeta} - for _, item := range obj.(*storagev1.CSIStorageCapacityList).Items { + list := &v1.CSIStorageCapacityList{ListMeta: obj.(*v1.CSIStorageCapacityList).ListMeta} + for _, item := range obj.(*v1.CSIStorageCapacityList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -77,63 +76,63 @@ func (c *FakeCSIStorageCapacities) List(ctx context.Context, opts v1.ListOptions } // Watch returns a watch.Interface that watches the requested cSIStorageCapacities. -func (c *FakeCSIStorageCapacities) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeCSIStorageCapacities) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(csistoragecapacitiesResource, c.ns, opts)) } // Create takes the representation of a cSIStorageCapacity and creates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *FakeCSIStorageCapacities) Create(ctx context.Context, cSIStorageCapacity *storagev1.CSIStorageCapacity, opts v1.CreateOptions) (result *storagev1.CSIStorageCapacity, err error) { +func (c *FakeCSIStorageCapacities) Create(ctx context.Context, cSIStorageCapacity *v1.CSIStorageCapacity, opts metav1.CreateOptions) (result *v1.CSIStorageCapacity, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(csistoragecapacitiesResource, c.ns, cSIStorageCapacity), &storagev1.CSIStorageCapacity{}) + Invokes(testing.NewCreateAction(csistoragecapacitiesResource, c.ns, cSIStorageCapacity), &v1.CSIStorageCapacity{}) if obj == nil { return nil, err } - return obj.(*storagev1.CSIStorageCapacity), err + return obj.(*v1.CSIStorageCapacity), err } // Update takes the representation of a cSIStorageCapacity and updates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *FakeCSIStorageCapacities) Update(ctx context.Context, cSIStorageCapacity *storagev1.CSIStorageCapacity, opts v1.UpdateOptions) (result *storagev1.CSIStorageCapacity, err error) { +func (c *FakeCSIStorageCapacities) Update(ctx context.Context, cSIStorageCapacity *v1.CSIStorageCapacity, opts metav1.UpdateOptions) (result *v1.CSIStorageCapacity, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(csistoragecapacitiesResource, c.ns, cSIStorageCapacity), &storagev1.CSIStorageCapacity{}) + Invokes(testing.NewUpdateAction(csistoragecapacitiesResource, c.ns, cSIStorageCapacity), &v1.CSIStorageCapacity{}) if obj == nil { return nil, err } - return obj.(*storagev1.CSIStorageCapacity), err + return obj.(*v1.CSIStorageCapacity), err } // Delete takes name of the cSIStorageCapacity and deletes it. Returns an error if one occurs. -func (c *FakeCSIStorageCapacities) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeCSIStorageCapacities) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(csistoragecapacitiesResource, c.ns, name, opts), &storagev1.CSIStorageCapacity{}) + Invokes(testing.NewDeleteActionWithOptions(csistoragecapacitiesResource, c.ns, name, opts), &v1.CSIStorageCapacity{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeCSIStorageCapacities) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeCSIStorageCapacities) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewDeleteCollectionAction(csistoragecapacitiesResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &storagev1.CSIStorageCapacityList{}) + _, err := c.Fake.Invokes(action, &v1.CSIStorageCapacityList{}) return err } // Patch applies the patch and returns the patched cSIStorageCapacity. -func (c *FakeCSIStorageCapacities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1.CSIStorageCapacity, err error) { +func (c *FakeCSIStorageCapacities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSIStorageCapacity, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(csistoragecapacitiesResource, c.ns, name, pt, data, subresources...), &storagev1.CSIStorageCapacity{}) + Invokes(testing.NewPatchSubresourceAction(csistoragecapacitiesResource, c.ns, name, pt, data, subresources...), &v1.CSIStorageCapacity{}) if obj == nil { return nil, err } - return obj.(*storagev1.CSIStorageCapacity), err + return obj.(*v1.CSIStorageCapacity), err } // Apply takes the given apply declarative configuration, applies it and returns the applied cSIStorageCapacity. -func (c *FakeCSIStorageCapacities) Apply(ctx context.Context, cSIStorageCapacity *applyconfigurationsstoragev1.CSIStorageCapacityApplyConfiguration, opts v1.ApplyOptions) (result *storagev1.CSIStorageCapacity, err error) { +func (c *FakeCSIStorageCapacities) Apply(ctx context.Context, cSIStorageCapacity *storagev1.CSIStorageCapacityApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CSIStorageCapacity, err error) { if cSIStorageCapacity == nil { return nil, fmt.Errorf("cSIStorageCapacity provided to Apply must not be nil") } @@ -146,10 +145,10 @@ func (c *FakeCSIStorageCapacities) Apply(ctx context.Context, cSIStorageCapacity return nil, fmt.Errorf("cSIStorageCapacity.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(csistoragecapacitiesResource, c.ns, *name, types.ApplyPatchType, data), &storagev1.CSIStorageCapacity{}) + Invokes(testing.NewPatchSubresourceAction(csistoragecapacitiesResource, c.ns, *name, types.ApplyPatchType, data), &v1.CSIStorageCapacity{}) if obj == nil { return nil, err } - return obj.(*storagev1.CSIStorageCapacity), err + return obj.(*v1.CSIStorageCapacity), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go index 60895be5b7c8..e232f4c8d7d3 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - storagev1 "k8s.io/api/storage/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationsstoragev1 "k8s.io/client-go/applyconfigurations/storage/v1" + storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" testing "k8s.io/client-go/testing" ) @@ -38,24 +37,24 @@ type FakeStorageClasses struct { Fake *FakeStorageV1 } -var storageclassesResource = schema.GroupVersionResource{Group: "storage.k8s.io", Version: "v1", Resource: "storageclasses"} +var storageclassesResource = v1.SchemeGroupVersion.WithResource("storageclasses") -var storageclassesKind = schema.GroupVersionKind{Group: "storage.k8s.io", Version: "v1", Kind: "StorageClass"} +var storageclassesKind = v1.SchemeGroupVersion.WithKind("StorageClass") // Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. -func (c *FakeStorageClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *storagev1.StorageClass, err error) { +func (c *FakeStorageClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.StorageClass, err error) { obj, err := c.Fake. - Invokes(testing.NewRootGetAction(storageclassesResource, name), &storagev1.StorageClass{}) + Invokes(testing.NewRootGetAction(storageclassesResource, name), &v1.StorageClass{}) if obj == nil { return nil, err } - return obj.(*storagev1.StorageClass), err + return obj.(*v1.StorageClass), err } // List takes label and field selectors, and returns the list of StorageClasses that match those selectors. -func (c *FakeStorageClasses) List(ctx context.Context, opts v1.ListOptions) (result *storagev1.StorageClassList, err error) { +func (c *FakeStorageClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.StorageClassList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootListAction(storageclassesResource, storageclassesKind, opts), &storagev1.StorageClassList{}) + Invokes(testing.NewRootListAction(storageclassesResource, storageclassesKind, opts), &v1.StorageClassList{}) if obj == nil { return nil, err } @@ -64,8 +63,8 @@ func (c *FakeStorageClasses) List(ctx context.Context, opts v1.ListOptions) (res if label == nil { label = labels.Everything() } - list := &storagev1.StorageClassList{ListMeta: obj.(*storagev1.StorageClassList).ListMeta} - for _, item := range obj.(*storagev1.StorageClassList).Items { + list := &v1.StorageClassList{ListMeta: obj.(*v1.StorageClassList).ListMeta} + for _, item := range obj.(*v1.StorageClassList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -74,58 +73,58 @@ func (c *FakeStorageClasses) List(ctx context.Context, opts v1.ListOptions) (res } // Watch returns a watch.Interface that watches the requested storageClasses. -func (c *FakeStorageClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeStorageClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(storageclassesResource, opts)) } // Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *FakeStorageClasses) Create(ctx context.Context, storageClass *storagev1.StorageClass, opts v1.CreateOptions) (result *storagev1.StorageClass, err error) { +func (c *FakeStorageClasses) Create(ctx context.Context, storageClass *v1.StorageClass, opts metav1.CreateOptions) (result *v1.StorageClass, err error) { obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(storageclassesResource, storageClass), &storagev1.StorageClass{}) + Invokes(testing.NewRootCreateAction(storageclassesResource, storageClass), &v1.StorageClass{}) if obj == nil { return nil, err } - return obj.(*storagev1.StorageClass), err + return obj.(*v1.StorageClass), err } // Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *FakeStorageClasses) Update(ctx context.Context, storageClass *storagev1.StorageClass, opts v1.UpdateOptions) (result *storagev1.StorageClass, err error) { +func (c *FakeStorageClasses) Update(ctx context.Context, storageClass *v1.StorageClass, opts metav1.UpdateOptions) (result *v1.StorageClass, err error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(storageclassesResource, storageClass), &storagev1.StorageClass{}) + Invokes(testing.NewRootUpdateAction(storageclassesResource, storageClass), &v1.StorageClass{}) if obj == nil { return nil, err } - return obj.(*storagev1.StorageClass), err + return obj.(*v1.StorageClass), err } // Delete takes name of the storageClass and deletes it. Returns an error if one occurs. -func (c *FakeStorageClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeStorageClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(storageclassesResource, name, opts), &storagev1.StorageClass{}) + Invokes(testing.NewRootDeleteActionWithOptions(storageclassesResource, name, opts), &v1.StorageClass{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeStorageClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeStorageClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(storageclassesResource, listOpts) - _, err := c.Fake.Invokes(action, &storagev1.StorageClassList{}) + _, err := c.Fake.Invokes(action, &v1.StorageClassList{}) return err } // Patch applies the patch and returns the patched storageClass. -func (c *FakeStorageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1.StorageClass, err error) { +func (c *FakeStorageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StorageClass, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, name, pt, data, subresources...), &storagev1.StorageClass{}) + Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, name, pt, data, subresources...), &v1.StorageClass{}) if obj == nil { return nil, err } - return obj.(*storagev1.StorageClass), err + return obj.(*v1.StorageClass), err } // Apply takes the given apply declarative configuration, applies it and returns the applied storageClass. -func (c *FakeStorageClasses) Apply(ctx context.Context, storageClass *applyconfigurationsstoragev1.StorageClassApplyConfiguration, opts v1.ApplyOptions) (result *storagev1.StorageClass, err error) { +func (c *FakeStorageClasses) Apply(ctx context.Context, storageClass *storagev1.StorageClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StorageClass, err error) { if storageClass == nil { return nil, fmt.Errorf("storageClass provided to Apply must not be nil") } @@ -138,9 +137,9 @@ func (c *FakeStorageClasses) Apply(ctx context.Context, storageClass *applyconfi return nil, fmt.Errorf("storageClass.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, *name, types.ApplyPatchType, data), &storagev1.StorageClass{}) + Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, *name, types.ApplyPatchType, data), &v1.StorageClass{}) if obj == nil { return nil, err } - return obj.(*storagev1.StorageClass), err + return obj.(*v1.StorageClass), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go index e48943f8ef1c..3f5f2aec5784 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go @@ -23,13 +23,12 @@ import ( json "encoding/json" "fmt" - storagev1 "k8s.io/api/storage/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationsstoragev1 "k8s.io/client-go/applyconfigurations/storage/v1" + storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" testing "k8s.io/client-go/testing" ) @@ -38,24 +37,24 @@ type FakeVolumeAttachments struct { Fake *FakeStorageV1 } -var volumeattachmentsResource = schema.GroupVersionResource{Group: "storage.k8s.io", Version: "v1", Resource: "volumeattachments"} +var volumeattachmentsResource = v1.SchemeGroupVersion.WithResource("volumeattachments") -var volumeattachmentsKind = schema.GroupVersionKind{Group: "storage.k8s.io", Version: "v1", Kind: "VolumeAttachment"} +var volumeattachmentsKind = v1.SchemeGroupVersion.WithKind("VolumeAttachment") // Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. -func (c *FakeVolumeAttachments) Get(ctx context.Context, name string, options v1.GetOptions) (result *storagev1.VolumeAttachment, err error) { +func (c *FakeVolumeAttachments) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.VolumeAttachment, err error) { obj, err := c.Fake. - Invokes(testing.NewRootGetAction(volumeattachmentsResource, name), &storagev1.VolumeAttachment{}) + Invokes(testing.NewRootGetAction(volumeattachmentsResource, name), &v1.VolumeAttachment{}) if obj == nil { return nil, err } - return obj.(*storagev1.VolumeAttachment), err + return obj.(*v1.VolumeAttachment), err } // List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. -func (c *FakeVolumeAttachments) List(ctx context.Context, opts v1.ListOptions) (result *storagev1.VolumeAttachmentList, err error) { +func (c *FakeVolumeAttachments) List(ctx context.Context, opts metav1.ListOptions) (result *v1.VolumeAttachmentList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootListAction(volumeattachmentsResource, volumeattachmentsKind, opts), &storagev1.VolumeAttachmentList{}) + Invokes(testing.NewRootListAction(volumeattachmentsResource, volumeattachmentsKind, opts), &v1.VolumeAttachmentList{}) if obj == nil { return nil, err } @@ -64,8 +63,8 @@ func (c *FakeVolumeAttachments) List(ctx context.Context, opts v1.ListOptions) ( if label == nil { label = labels.Everything() } - list := &storagev1.VolumeAttachmentList{ListMeta: obj.(*storagev1.VolumeAttachmentList).ListMeta} - for _, item := range obj.(*storagev1.VolumeAttachmentList).Items { + list := &v1.VolumeAttachmentList{ListMeta: obj.(*v1.VolumeAttachmentList).ListMeta} + for _, item := range obj.(*v1.VolumeAttachmentList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -74,69 +73,69 @@ func (c *FakeVolumeAttachments) List(ctx context.Context, opts v1.ListOptions) ( } // Watch returns a watch.Interface that watches the requested volumeAttachments. -func (c *FakeVolumeAttachments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeVolumeAttachments) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(volumeattachmentsResource, opts)) } // Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *FakeVolumeAttachments) Create(ctx context.Context, volumeAttachment *storagev1.VolumeAttachment, opts v1.CreateOptions) (result *storagev1.VolumeAttachment, err error) { +func (c *FakeVolumeAttachments) Create(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.CreateOptions) (result *v1.VolumeAttachment, err error) { obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(volumeattachmentsResource, volumeAttachment), &storagev1.VolumeAttachment{}) + Invokes(testing.NewRootCreateAction(volumeattachmentsResource, volumeAttachment), &v1.VolumeAttachment{}) if obj == nil { return nil, err } - return obj.(*storagev1.VolumeAttachment), err + return obj.(*v1.VolumeAttachment), err } // Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *FakeVolumeAttachments) Update(ctx context.Context, volumeAttachment *storagev1.VolumeAttachment, opts v1.UpdateOptions) (result *storagev1.VolumeAttachment, err error) { +func (c *FakeVolumeAttachments) Update(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (result *v1.VolumeAttachment, err error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(volumeattachmentsResource, volumeAttachment), &storagev1.VolumeAttachment{}) + Invokes(testing.NewRootUpdateAction(volumeattachmentsResource, volumeAttachment), &v1.VolumeAttachment{}) if obj == nil { return nil, err } - return obj.(*storagev1.VolumeAttachment), err + return obj.(*v1.VolumeAttachment), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *storagev1.VolumeAttachment, opts v1.UpdateOptions) (*storagev1.VolumeAttachment, error) { +func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (*v1.VolumeAttachment, error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(volumeattachmentsResource, "status", volumeAttachment), &storagev1.VolumeAttachment{}) + Invokes(testing.NewRootUpdateSubresourceAction(volumeattachmentsResource, "status", volumeAttachment), &v1.VolumeAttachment{}) if obj == nil { return nil, err } - return obj.(*storagev1.VolumeAttachment), err + return obj.(*v1.VolumeAttachment), err } // Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. -func (c *FakeVolumeAttachments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeVolumeAttachments) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(volumeattachmentsResource, name, opts), &storagev1.VolumeAttachment{}) + Invokes(testing.NewRootDeleteActionWithOptions(volumeattachmentsResource, name, opts), &v1.VolumeAttachment{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeVolumeAttachments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeVolumeAttachments) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(volumeattachmentsResource, listOpts) - _, err := c.Fake.Invokes(action, &storagev1.VolumeAttachmentList{}) + _, err := c.Fake.Invokes(action, &v1.VolumeAttachmentList{}) return err } // Patch applies the patch and returns the patched volumeAttachment. -func (c *FakeVolumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1.VolumeAttachment, err error) { +func (c *FakeVolumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.VolumeAttachment, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, name, pt, data, subresources...), &storagev1.VolumeAttachment{}) + Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, name, pt, data, subresources...), &v1.VolumeAttachment{}) if obj == nil { return nil, err } - return obj.(*storagev1.VolumeAttachment), err + return obj.(*v1.VolumeAttachment), err } // Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttachment. -func (c *FakeVolumeAttachments) Apply(ctx context.Context, volumeAttachment *applyconfigurationsstoragev1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *storagev1.VolumeAttachment, err error) { +func (c *FakeVolumeAttachments) Apply(ctx context.Context, volumeAttachment *storagev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.VolumeAttachment, err error) { if volumeAttachment == nil { return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") } @@ -149,16 +148,16 @@ func (c *FakeVolumeAttachments) Apply(ctx context.Context, volumeAttachment *app return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, *name, types.ApplyPatchType, data), &storagev1.VolumeAttachment{}) + Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, *name, types.ApplyPatchType, data), &v1.VolumeAttachment{}) if obj == nil { return nil, err } - return obj.(*storagev1.VolumeAttachment), err + return obj.(*v1.VolumeAttachment), err } // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeVolumeAttachments) ApplyStatus(ctx context.Context, volumeAttachment *applyconfigurationsstoragev1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *storagev1.VolumeAttachment, err error) { +func (c *FakeVolumeAttachments) ApplyStatus(ctx context.Context, volumeAttachment *storagev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.VolumeAttachment, err error) { if volumeAttachment == nil { return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") } @@ -171,9 +170,9 @@ func (c *FakeVolumeAttachments) ApplyStatus(ctx context.Context, volumeAttachmen return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, *name, types.ApplyPatchType, data, "status"), &storagev1.VolumeAttachment{}) + Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, *name, types.ApplyPatchType, data, "status"), &v1.VolumeAttachment{}) if obj == nil { return nil, err } - return obj.(*storagev1.VolumeAttachment), err + return obj.(*v1.VolumeAttachment), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_csistoragecapacity.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_csistoragecapacity.go index f56ed7895aec..c1614cda7dcb 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_csistoragecapacity.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_csistoragecapacity.go @@ -26,7 +26,6 @@ import ( v1alpha1 "k8s.io/api/storage/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" @@ -39,9 +38,9 @@ type FakeCSIStorageCapacities struct { ns string } -var csistoragecapacitiesResource = schema.GroupVersionResource{Group: "storage.k8s.io", Version: "v1alpha1", Resource: "csistoragecapacities"} +var csistoragecapacitiesResource = v1alpha1.SchemeGroupVersion.WithResource("csistoragecapacities") -var csistoragecapacitiesKind = schema.GroupVersionKind{Group: "storage.k8s.io", Version: "v1alpha1", Kind: "CSIStorageCapacity"} +var csistoragecapacitiesKind = v1alpha1.SchemeGroupVersion.WithKind("CSIStorageCapacity") // Get takes name of the cSIStorageCapacity, and returns the corresponding cSIStorageCapacity object, and an error if there is any. func (c *FakeCSIStorageCapacities) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.CSIStorageCapacity, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go index eb6ec7983bf9..9725d6d10bab 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go @@ -26,7 +26,6 @@ import ( v1alpha1 "k8s.io/api/storage/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" @@ -38,9 +37,9 @@ type FakeVolumeAttachments struct { Fake *FakeStorageV1alpha1 } -var volumeattachmentsResource = schema.GroupVersionResource{Group: "storage.k8s.io", Version: "v1alpha1", Resource: "volumeattachments"} +var volumeattachmentsResource = v1alpha1.SchemeGroupVersion.WithResource("volumeattachments") -var volumeattachmentsKind = schema.GroupVersionKind{Group: "storage.k8s.io", Version: "v1alpha1", Kind: "VolumeAttachment"} +var volumeattachmentsKind = v1alpha1.SchemeGroupVersion.WithKind("VolumeAttachment") // Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. func (c *FakeVolumeAttachments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VolumeAttachment, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go index c65fa74222f2..4257aa61830f 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go @@ -26,7 +26,6 @@ import ( v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" @@ -38,9 +37,9 @@ type FakeCSIDrivers struct { Fake *FakeStorageV1beta1 } -var csidriversResource = schema.GroupVersionResource{Group: "storage.k8s.io", Version: "v1beta1", Resource: "csidrivers"} +var csidriversResource = v1beta1.SchemeGroupVersion.WithResource("csidrivers") -var csidriversKind = schema.GroupVersionKind{Group: "storage.k8s.io", Version: "v1beta1", Kind: "CSIDriver"} +var csidriversKind = v1beta1.SchemeGroupVersion.WithKind("CSIDriver") // Get takes name of the cSIDriver, and returns the corresponding cSIDriver object, and an error if there is any. func (c *FakeCSIDrivers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSIDriver, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go index 1ffd49b3ab26..d38c104bc166 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go @@ -26,7 +26,6 @@ import ( v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" @@ -38,9 +37,9 @@ type FakeCSINodes struct { Fake *FakeStorageV1beta1 } -var csinodesResource = schema.GroupVersionResource{Group: "storage.k8s.io", Version: "v1beta1", Resource: "csinodes"} +var csinodesResource = v1beta1.SchemeGroupVersion.WithResource("csinodes") -var csinodesKind = schema.GroupVersionKind{Group: "storage.k8s.io", Version: "v1beta1", Kind: "CSINode"} +var csinodesKind = v1beta1.SchemeGroupVersion.WithKind("CSINode") // Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any. func (c *FakeCSINodes) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSINode, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csistoragecapacity.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csistoragecapacity.go index 1715884d1bc0..d7bbb614b23e 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csistoragecapacity.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csistoragecapacity.go @@ -26,7 +26,6 @@ import ( v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" @@ -39,9 +38,9 @@ type FakeCSIStorageCapacities struct { ns string } -var csistoragecapacitiesResource = schema.GroupVersionResource{Group: "storage.k8s.io", Version: "v1beta1", Resource: "csistoragecapacities"} +var csistoragecapacitiesResource = v1beta1.SchemeGroupVersion.WithResource("csistoragecapacities") -var csistoragecapacitiesKind = schema.GroupVersionKind{Group: "storage.k8s.io", Version: "v1beta1", Kind: "CSIStorageCapacity"} +var csistoragecapacitiesKind = v1beta1.SchemeGroupVersion.WithKind("CSIStorageCapacity") // Get takes name of the cSIStorageCapacity, and returns the corresponding cSIStorageCapacity object, and an error if there is any. func (c *FakeCSIStorageCapacities) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSIStorageCapacity, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go index f929c55f8d4e..869e58b4f71f 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go @@ -26,7 +26,6 @@ import ( v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" @@ -38,9 +37,9 @@ type FakeStorageClasses struct { Fake *FakeStorageV1beta1 } -var storageclassesResource = schema.GroupVersionResource{Group: "storage.k8s.io", Version: "v1beta1", Resource: "storageclasses"} +var storageclassesResource = v1beta1.SchemeGroupVersion.WithResource("storageclasses") -var storageclassesKind = schema.GroupVersionKind{Group: "storage.k8s.io", Version: "v1beta1", Kind: "StorageClass"} +var storageclassesKind = v1beta1.SchemeGroupVersion.WithKind("StorageClass") // Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. func (c *FakeStorageClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.StorageClass, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go index 81df8776919a..e2b4a2eb1b26 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go @@ -26,7 +26,6 @@ import ( v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" @@ -38,9 +37,9 @@ type FakeVolumeAttachments struct { Fake *FakeStorageV1beta1 } -var volumeattachmentsResource = schema.GroupVersionResource{Group: "storage.k8s.io", Version: "v1beta1", Resource: "volumeattachments"} +var volumeattachmentsResource = v1beta1.SchemeGroupVersion.WithResource("volumeattachments") -var volumeattachmentsKind = schema.GroupVersionKind{Group: "storage.k8s.io", Version: "v1beta1", Kind: "VolumeAttachment"} +var volumeattachmentsKind = v1beta1.SchemeGroupVersion.WithKind("VolumeAttachment") // Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. func (c *FakeVolumeAttachments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.VolumeAttachment, err error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go b/cluster-autoscaler/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go index 5599219d9e51..4c65dbf7645c 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go @@ -41,7 +41,3 @@ type NetworkPolicyListerExpansion interface{} // NetworkPolicyNamespaceListerExpansion allows custom methods to be added to // NetworkPolicyNamespaceLister. type NetworkPolicyNamespaceListerExpansion interface{} - -// PodSecurityPolicyListerExpansion allows custom methods to be added to -// PodSecurityPolicyLister. -type PodSecurityPolicyListerExpansion interface{} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/listers/extensions/v1beta1/podsecuritypolicy.go b/cluster-autoscaler/vendor/k8s.io/client-go/listers/extensions/v1beta1/podsecuritypolicy.go deleted file mode 100644 index 5f6a8c036014..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/listers/extensions/v1beta1/podsecuritypolicy.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// PodSecurityPolicyLister helps list PodSecurityPolicies. -// All objects returned here must be treated as read-only. -type PodSecurityPolicyLister interface { - // List lists all PodSecurityPolicies in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.PodSecurityPolicy, err error) - // Get retrieves the PodSecurityPolicy from the index for a given name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.PodSecurityPolicy, error) - PodSecurityPolicyListerExpansion -} - -// podSecurityPolicyLister implements the PodSecurityPolicyLister interface. -type podSecurityPolicyLister struct { - indexer cache.Indexer -} - -// NewPodSecurityPolicyLister returns a new PodSecurityPolicyLister. -func NewPodSecurityPolicyLister(indexer cache.Indexer) PodSecurityPolicyLister { - return &podSecurityPolicyLister{indexer: indexer} -} - -// List lists all PodSecurityPolicies in the indexer. -func (s *podSecurityPolicyLister) List(selector labels.Selector) (ret []*v1beta1.PodSecurityPolicy, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.PodSecurityPolicy)) - }) - return ret, err -} - -// Get retrieves the PodSecurityPolicy from the index for a given name. -func (s *podSecurityPolicyLister) Get(name string) (*v1beta1.PodSecurityPolicy, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("podsecuritypolicy"), name) - } - return obj.(*v1beta1.PodSecurityPolicy), nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/openapi/OWNERS b/cluster-autoscaler/vendor/k8s.io/client-go/openapi/OWNERS new file mode 100644 index 000000000000..e61009424261 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/openapi/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - apelisse diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/rest/client.go b/cluster-autoscaler/vendor/k8s.io/client-go/rest/client.go index 2cf821bcd7a2..60df7e568c35 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/rest/client.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/rest/client.go @@ -52,8 +52,7 @@ type Interface interface { // ClientContentConfig controls how RESTClient communicates with the server. // // TODO: ContentConfig will be updated to accept a Negotiator instead of a -// -// NegotiatedSerializer and NegotiatedSerializer will be removed. +// NegotiatedSerializer and NegotiatedSerializer will be removed. type ClientContentConfig struct { // AcceptContentTypes specifies the types the client will accept and is optional. // If not set, ContentType will be used to define the Accept header diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/rest/request.go b/cluster-autoscaler/vendor/k8s.io/client-go/rest/request.go index 560f73f0020a..96e725692d37 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/rest/request.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/rest/request.go @@ -34,6 +34,7 @@ import ( "time" "golang.org/x/net/http2" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -116,8 +117,11 @@ type Request struct { subresource string // output - err error - body io.Reader + err error + + // only one of body / bodyBytes may be set. requests using body are not retriable. + body io.Reader + bodyBytes []byte retryFn requestRetryFunc } @@ -443,12 +447,15 @@ func (r *Request) Body(obj interface{}) *Request { return r } glogBody("Request Body", data) - r.body = bytes.NewReader(data) + r.body = nil + r.bodyBytes = data case []byte: glogBody("Request Body", t) - r.body = bytes.NewReader(t) + r.body = nil + r.bodyBytes = t case io.Reader: r.body = t + r.bodyBytes = nil case runtime.Object: // callers may pass typed interface pointers, therefore we must check nil with reflection if reflect.ValueOf(t).IsNil() { @@ -465,7 +472,8 @@ func (r *Request) Body(obj interface{}) *Request { return r } glogBody("Request Body", data) - r.body = bytes.NewReader(data) + r.body = nil + r.bodyBytes = data r.SetHeader("Content-Type", r.c.content.ContentType) default: r.err = fmt.Errorf("unknown type used for body: %+v", obj) @@ -825,9 +833,6 @@ func (r *Request) Stream(ctx context.Context) (io.ReadCloser, error) { if err != nil { return nil, err } - if r.body != nil { - req.Body = io.NopCloser(r.body) - } resp, err := client.Do(req) updateURLMetrics(ctx, r, resp, err) retry.After(ctx, r, resp, err) @@ -889,8 +894,20 @@ func (r *Request) requestPreflightCheck() error { } func (r *Request) newHTTPRequest(ctx context.Context) (*http.Request, error) { + var body io.Reader + switch { + case r.body != nil && r.bodyBytes != nil: + return nil, fmt.Errorf("cannot set both body and bodyBytes") + case r.body != nil: + body = r.body + case r.bodyBytes != nil: + // Create a new reader specifically for this request. + // Giving each request a dedicated reader allows retries to avoid races resetting the request body. + body = bytes.NewReader(r.bodyBytes) + } + url := r.URL().String() - req, err := http.NewRequest(r.verb, url, r.body) + req, err := http.NewRequest(r.verb, url, body) if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/rest/with_retry.go b/cluster-autoscaler/vendor/k8s.io/client-go/rest/with_retry.go index b04e3e9eff7b..207060a5cc2a 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/rest/with_retry.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/rest/with_retry.go @@ -153,6 +153,11 @@ func (r *withRetry) IsNextRetry(ctx context.Context, restReq *Request, httpReq * return false } + if restReq.body != nil { + // we have an opaque reader, we can't safely reset it + return false + } + r.attempts++ r.retryAfter = &RetryAfter{Attempt: r.attempts} if r.attempts > r.maxRetries { @@ -209,18 +214,6 @@ func (r *withRetry) Before(ctx context.Context, request *Request) error { return nil } - // At this point we've made atleast one attempt, post which the response - // body should have been fully read and closed in order for it to be safe - // to reset the request body before we reconnect, in order for us to reuse - // the same TCP connection. - if seeker, ok := request.body.(io.Seeker); ok && request.body != nil { - if _, err := seeker.Seek(0, io.SeekStart); err != nil { - err = fmt.Errorf("failed to reset the request body while retrying a request: %v", err) - r.trackPreviousError(err) - return err - } - } - // if we are here, we have made attempt(s) at least once before. if request.backoff != nil { delay := request.backoff.CalculateBackoff(url) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/controller.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/controller.go index 0762da3befac..466a708e9a60 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/controller.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/controller.go @@ -50,11 +50,12 @@ type Config struct { Process ProcessFunc // ObjectType is an example object of the type this controller is - // expected to handle. Only the type needs to be right, except - // that when that is `unstructured.Unstructured` the object's - // `"apiVersion"` and `"kind"` must also be right. + // expected to handle. ObjectType runtime.Object + // ObjectDescription is the description to use when logging type-specific information about this controller. + ObjectDescription string + // FullResyncPeriod is the period at which ShouldResync is considered. FullResyncPeriod time.Duration @@ -84,7 +85,7 @@ type Config struct { type ShouldResyncFunc func() bool // ProcessFunc processes a single object. -type ProcessFunc func(obj interface{}) error +type ProcessFunc func(obj interface{}, isInInitialList bool) error // `*controller` implements Controller type controller struct { @@ -131,15 +132,18 @@ func (c *controller) Run(stopCh <-chan struct{}) { <-stopCh c.config.Queue.Close() }() - r := NewReflector( + r := NewReflectorWithOptions( c.config.ListerWatcher, c.config.ObjectType, c.config.Queue, - c.config.FullResyncPeriod, + ReflectorOptions{ + ResyncPeriod: c.config.FullResyncPeriod, + TypeDescription: c.config.ObjectDescription, + Clock: c.clock, + }, ) r.ShouldResync = c.config.ShouldResync r.WatchListPageSize = c.config.WatchListPageSize - r.clock = c.clock if c.config.WatchErrorHandler != nil { r.watchErrorHandler = c.config.WatchErrorHandler } @@ -211,7 +215,7 @@ func (c *controller) processLoop() { // happen if the watch is closed and misses the delete event and we don't // notice the deletion until the subsequent re-list. type ResourceEventHandler interface { - OnAdd(obj interface{}) + OnAdd(obj interface{}, isInInitialList bool) OnUpdate(oldObj, newObj interface{}) OnDelete(obj interface{}) } @@ -220,6 +224,9 @@ type ResourceEventHandler interface { // as few of the notification functions as you want while still implementing // ResourceEventHandler. This adapter does not remove the prohibition against // modifying the objects. +// +// See ResourceEventHandlerDetailedFuncs if your use needs to propagate +// HasSynced. type ResourceEventHandlerFuncs struct { AddFunc func(obj interface{}) UpdateFunc func(oldObj, newObj interface{}) @@ -227,7 +234,7 @@ type ResourceEventHandlerFuncs struct { } // OnAdd calls AddFunc if it's not nil. -func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}) { +func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}, isInInitialList bool) { if r.AddFunc != nil { r.AddFunc(obj) } @@ -247,6 +254,36 @@ func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) { } } +// ResourceEventHandlerDetailedFuncs is exactly like ResourceEventHandlerFuncs +// except its AddFunc accepts the isInInitialList parameter, for propagating +// HasSynced. +type ResourceEventHandlerDetailedFuncs struct { + AddFunc func(obj interface{}, isInInitialList bool) + UpdateFunc func(oldObj, newObj interface{}) + DeleteFunc func(obj interface{}) +} + +// OnAdd calls AddFunc if it's not nil. +func (r ResourceEventHandlerDetailedFuncs) OnAdd(obj interface{}, isInInitialList bool) { + if r.AddFunc != nil { + r.AddFunc(obj, isInInitialList) + } +} + +// OnUpdate calls UpdateFunc if it's not nil. +func (r ResourceEventHandlerDetailedFuncs) OnUpdate(oldObj, newObj interface{}) { + if r.UpdateFunc != nil { + r.UpdateFunc(oldObj, newObj) + } +} + +// OnDelete calls DeleteFunc if it's not nil. +func (r ResourceEventHandlerDetailedFuncs) OnDelete(obj interface{}) { + if r.DeleteFunc != nil { + r.DeleteFunc(obj) + } +} + // FilteringResourceEventHandler applies the provided filter to all events coming // in, ensuring the appropriate nested handler method is invoked. An object // that starts passing the filter after an update is considered an add, and an @@ -258,11 +295,11 @@ type FilteringResourceEventHandler struct { } // OnAdd calls the nested handler only if the filter succeeds -func (r FilteringResourceEventHandler) OnAdd(obj interface{}) { +func (r FilteringResourceEventHandler) OnAdd(obj interface{}, isInInitialList bool) { if !r.FilterFunc(obj) { return } - r.Handler.OnAdd(obj) + r.Handler.OnAdd(obj, isInInitialList) } // OnUpdate ensures the proper handler is called depending on whether the filter matches @@ -273,7 +310,7 @@ func (r FilteringResourceEventHandler) OnUpdate(oldObj, newObj interface{}) { case newer && older: r.Handler.OnUpdate(oldObj, newObj) case newer && !older: - r.Handler.OnAdd(newObj) + r.Handler.OnAdd(newObj, false) case !newer && older: r.Handler.OnDelete(oldObj) default: @@ -413,6 +450,7 @@ func processDeltas( clientState Store, transformer TransformFunc, deltas Deltas, + isInInitialList bool, ) error { // from oldest to newest for _, d := range deltas { @@ -436,7 +474,7 @@ func processDeltas( if err := clientState.Add(obj); err != nil { return err } - handler.OnAdd(obj) + handler.OnAdd(obj, isInInitialList) } case Deleted: if err := clientState.Delete(obj); err != nil { @@ -484,9 +522,9 @@ func newInformer( FullResyncPeriod: resyncPeriod, RetryOnError: false, - Process: func(obj interface{}) error { + Process: func(obj interface{}, isInInitialList bool) error { if deltas, ok := obj.(Deltas); ok { - return processDeltas(h, clientState, transformer, deltas) + return processDeltas(h, clientState, transformer, deltas, isInInitialList) } return errors.New("object given as Process argument is not Deltas") }, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/delta_fifo.go index 0c13a41f065b..c4f2de7b2517 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/delta_fifo.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -271,6 +271,10 @@ func (f *DeltaFIFO) KeyOf(obj interface{}) (string, error) { func (f *DeltaFIFO) HasSynced() bool { f.lock.Lock() defer f.lock.Unlock() + return f.hasSynced_locked() +} + +func (f *DeltaFIFO) hasSynced_locked() bool { return f.populated && f.initialPopulationCount == 0 } @@ -526,6 +530,7 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { f.cond.Wait() } + isInInitialList := !f.hasSynced_locked() id := f.queue[0] f.queue = f.queue[1:] depth := len(f.queue) @@ -551,7 +556,7 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { utiltrace.Field{Key: "Reason", Value: "slow event handlers blocking the queue"}) defer trace.LogIfLong(100 * time.Millisecond) } - err := process(item) + err := process(item, isInInitialList) if e, ok := err.(ErrRequeue); ok { f.addIfNotPresent(id, item) err = e.Err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/fifo.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/fifo.go index 8f3313783d59..dd13c4ea774a 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/fifo.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/fifo.go @@ -25,7 +25,7 @@ import ( // PopProcessFunc is passed to Pop() method of Queue interface. // It is supposed to process the accumulator popped from the queue. -type PopProcessFunc func(interface{}) error +type PopProcessFunc func(obj interface{}, isInInitialList bool) error // ErrRequeue may be returned by a PopProcessFunc to safely requeue // the current item. The value of Err will be returned from Pop. @@ -82,9 +82,12 @@ type Queue interface { // Pop is helper function for popping from Queue. // WARNING: Do NOT use this function in non-test code to avoid races // unless you really really really really know what you are doing. +// +// NOTE: This function is deprecated and may be removed in the future without +// additional warning. func Pop(queue Queue) interface{} { var result interface{} - queue.Pop(func(obj interface{}) error { + queue.Pop(func(obj interface{}, isInInitialList bool) error { result = obj return nil }) @@ -149,6 +152,10 @@ func (f *FIFO) Close() { func (f *FIFO) HasSynced() bool { f.lock.Lock() defer f.lock.Unlock() + return f.hasSynced_locked() +} + +func (f *FIFO) hasSynced_locked() bool { return f.populated && f.initialPopulationCount == 0 } @@ -287,6 +294,7 @@ func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) { f.cond.Wait() } + isInInitialList := !f.hasSynced_locked() id := f.queue[0] f.queue = f.queue[1:] if f.initialPopulationCount > 0 { @@ -298,7 +306,7 @@ func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) { continue } delete(f.items, id) - err := process(item) + err := process(item, isInInitialList) if e, ok := err.(ErrRequeue); ok { f.addIfNotPresent(id, item) err = e.Err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/reflector.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/reflector.go index 9cd476be8a00..b320b7a1ef2a 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -23,6 +23,7 @@ import ( "io" "math/rand" "reflect" + "strings" "sync" "time" @@ -54,7 +55,7 @@ type Reflector struct { // will be the stringification of expectedGVK if provided, and the // stringification of expectedType otherwise. It is for display // only, and should not be used for parsing or comparison. - expectedTypeName string + typeDescription string // An example object of the type we expect to place in the store. // Only the type needs to be right, except that when that is // `unstructured.Unstructured` the object's `"apiVersion"` and @@ -131,13 +132,13 @@ func DefaultWatchErrorHandler(r *Reflector, err error) { // Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already // has a semantic that it returns data at least as fresh as provided RV. // So first try to LIST with setting RV to resource version of last observed object. - klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err) + klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.typeDescription, err) case err == io.EOF: // watch closed normally case err == io.ErrUnexpectedEOF: - klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedTypeName, err) + klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.typeDescription, err) default: - utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedTypeName, err)) + utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.typeDescription, err)) } } @@ -155,7 +156,40 @@ func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interfa return indexer, reflector } -// NewReflector creates a new Reflector object which will keep the +// NewReflector creates a new Reflector with its name defaulted to the closest source_file.go:line in the call stack +// that is outside this package. See NewReflectorWithOptions for further information. +func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { + return NewReflectorWithOptions(lw, expectedType, store, ReflectorOptions{ResyncPeriod: resyncPeriod}) +} + +// NewNamedReflector creates a new Reflector with the specified name. See NewReflectorWithOptions for further +// information. +func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { + return NewReflectorWithOptions(lw, expectedType, store, ReflectorOptions{Name: name, ResyncPeriod: resyncPeriod}) +} + +// ReflectorOptions configures a Reflector. +type ReflectorOptions struct { + // Name is the Reflector's name. If unset/unspecified, the name defaults to the closest source_file.go:line + // in the call stack that is outside this package. + Name string + + // TypeDescription is the Reflector's type description. If unset/unspecified, the type description is defaulted + // using the following rules: if the expectedType passed to NewReflectorWithOptions was nil, the type description is + // "". If the expectedType is an instance of *unstructured.Unstructured and its apiVersion and kind fields + // are set, the type description is the string encoding of those. Otherwise, the type description is set to the + // go type of expectedType.. + TypeDescription string + + // ResyncPeriod is the Reflector's resync period. If unset/unspecified, the resync period defaults to 0 + // (do not resync). + ResyncPeriod time.Duration + + // Clock allows tests to control time. If unset defaults to clock.RealClock{} + Clock clock.Clock +} + +// NewReflectorWithOptions creates a new Reflector object which will keep the // given store up to date with the server's contents for the given // resource. Reflector promises to only put things in the store that // have the type of expectedType, unless expectedType is nil. If @@ -165,49 +199,74 @@ func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interfa // "yes". This enables you to use reflectors to periodically process // everything as well as incrementally processing the things that // change. -func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { - return NewNamedReflector(naming.GetNameFromCallsite(internalPackages...), lw, expectedType, store, resyncPeriod) -} - -// NewNamedReflector same as NewReflector, but with a specified name for logging -func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { - realClock := &clock.RealClock{} +func NewReflectorWithOptions(lw ListerWatcher, expectedType interface{}, store Store, options ReflectorOptions) *Reflector { + reflectorClock := options.Clock + if reflectorClock == nil { + reflectorClock = clock.RealClock{} + } r := &Reflector{ - name: name, - listerWatcher: lw, - store: store, + name: options.Name, + resyncPeriod: options.ResyncPeriod, + typeDescription: options.TypeDescription, + listerWatcher: lw, + store: store, // We used to make the call every 1sec (1 QPS), the goal here is to achieve ~98% traffic reduction when // API server is not healthy. With these parameters, backoff will stop at [30,60) sec interval which is // 0.22 QPS. If we don't backoff for 2min, assume API server is healthy and we reset the backoff. - backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock), - initConnBackoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock), - resyncPeriod: resyncPeriod, - clock: realClock, + backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, reflectorClock), + initConnBackoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, reflectorClock), + clock: reflectorClock, watchErrorHandler: WatchErrorHandler(DefaultWatchErrorHandler), + expectedType: reflect.TypeOf(expectedType), + } + + if r.name == "" { + r.name = naming.GetNameFromCallsite(internalPackages...) + } + + if r.typeDescription == "" { + r.typeDescription = getTypeDescriptionFromObject(expectedType) + } + + if r.expectedGVK == nil { + r.expectedGVK = getExpectedGVKFromObject(expectedType) } - r.setExpectedType(expectedType) + return r } -func (r *Reflector) setExpectedType(expectedType interface{}) { - r.expectedType = reflect.TypeOf(expectedType) - if r.expectedType == nil { - r.expectedTypeName = defaultExpectedTypeName - return +func getTypeDescriptionFromObject(expectedType interface{}) string { + if expectedType == nil { + return defaultExpectedTypeName } - r.expectedTypeName = r.expectedType.String() + reflectDescription := reflect.TypeOf(expectedType).String() - if obj, ok := expectedType.(*unstructured.Unstructured); ok { - // Use gvk to check that watch event objects are of the desired type. - gvk := obj.GroupVersionKind() - if gvk.Empty() { - klog.V(4).Infof("Reflector from %s configured with expectedType of *unstructured.Unstructured with empty GroupVersionKind.", r.name) - return - } - r.expectedGVK = &gvk - r.expectedTypeName = gvk.String() + obj, ok := expectedType.(*unstructured.Unstructured) + if !ok { + return reflectDescription + } + + gvk := obj.GroupVersionKind() + if gvk.Empty() { + return reflectDescription + } + + return gvk.String() +} + +func getExpectedGVKFromObject(expectedType interface{}) *schema.GroupVersionKind { + obj, ok := expectedType.(*unstructured.Unstructured) + if !ok { + return nil } + + gvk := obj.GroupVersionKind() + if gvk.Empty() { + return nil + } + + return &gvk } // internalPackages are packages that ignored when creating a default reflector name. These packages are in the common @@ -218,13 +277,13 @@ var internalPackages = []string{"client-go/tools/cache/"} // objects and subsequent deltas. // Run will exit when stopCh is closed. func (r *Reflector) Run(stopCh <-chan struct{}) { - klog.V(3).Infof("Starting reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name) + klog.V(3).Infof("Starting reflector %s (%s) from %s", r.typeDescription, r.resyncPeriod, r.name) wait.BackoffUntil(func() { if err := r.ListAndWatch(stopCh); err != nil { r.watchErrorHandler(r, err) } }, r.backoffManager, true, stopCh) - klog.V(3).Infof("Stopping reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name) + klog.V(3).Infof("Stopping reflector %s (%s) from %s", r.typeDescription, r.resyncPeriod, r.name) } var ( @@ -254,7 +313,7 @@ func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) { // and then use the resource version to watch. // It returns error if ListAndWatch didn't even try to initialize watch. func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { - klog.V(3).Infof("Listing and watching %v from %s", r.expectedTypeName, r.name) + klog.V(3).Infof("Listing and watching %v from %s", r.typeDescription, r.name) err := r.list(stopCh) if err != nil { @@ -326,7 +385,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { return err } - err = watchHandler(start, w, r.store, r.expectedType, r.expectedGVK, r.name, r.expectedTypeName, r.setLastSyncResourceVersion, r.clock, resyncerrc, stopCh) + err = watchHandler(start, w, r.store, r.expectedType, r.expectedGVK, r.name, r.typeDescription, r.setLastSyncResourceVersion, r.clock, resyncerrc, stopCh) retry.After(err) if err != nil { if err != errorStopRequested { @@ -335,16 +394,16 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { // Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already // has a semantic that it returns data at least as fresh as provided RV. // So first try to LIST with setting RV to resource version of last observed object. - klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err) + klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.typeDescription, err) case apierrors.IsTooManyRequests(err): - klog.V(2).Infof("%s: watch of %v returned 429 - backing off", r.name, r.expectedTypeName) + klog.V(2).Infof("%s: watch of %v returned 429 - backing off", r.name, r.typeDescription) <-r.initConnBackoffManager.Backoff().C() continue case apierrors.IsInternalError(err) && retry.ShouldRetry(): - klog.V(2).Infof("%s: retrying watch of %v internal error: %v", r.name, r.expectedTypeName, err) + klog.V(2).Infof("%s: retrying watch of %v internal error: %v", r.name, r.typeDescription, err) continue default: - klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedTypeName, err) + klog.Warningf("%s: watch of %v ended with: %v", r.name, r.typeDescription, err) } } return nil @@ -421,8 +480,8 @@ func (r *Reflector) list(stopCh <-chan struct{}) error { } initTrace.Step("Objects listed", trace.Field{Key: "error", Value: err}) if err != nil { - klog.Warningf("%s: failed to list %v: %v", r.name, r.expectedTypeName, err) - return fmt.Errorf("failed to list %v: %w", r.expectedTypeName, err) + klog.Warningf("%s: failed to list %v: %v", r.name, r.typeDescription, err) + return fmt.Errorf("failed to list %v: %w", r.typeDescription, err) } // We check if the list was paginated and if so set the paginatedResult based on that. @@ -635,5 +694,11 @@ func isTooLargeResourceVersionError(err error) bool { return true } } + + // Matches the message returned by api server before 1.17.0 + if strings.Contains(apierr.Status().Message, "Too large resource version") { + return true + } + return false } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/shared_informer.go index f5c7316a1d7d..2717bde589cf 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/shared_informer.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache/synctrack" "k8s.io/utils/buffer" "k8s.io/utils/clock" @@ -132,11 +133,13 @@ import ( // state, except that its ResourceVersion is replaced with a // ResourceVersion in which the object is actually absent. type SharedInformer interface { - // AddEventHandler adds an event handler to the shared informer using the shared informer's resync - // period. Events to a single handler are delivered sequentially, but there is no coordination - // between different handlers. - // It returns a registration handle for the handler that can be used to remove - // the handler again. + // AddEventHandler adds an event handler to the shared informer using + // the shared informer's resync period. Events to a single handler are + // delivered sequentially, but there is no coordination between + // different handlers. + // It returns a registration handle for the handler that can be used to + // remove the handler again, or to tell if the handler is synced (has + // seen every item in the initial list). AddEventHandler(handler ResourceEventHandler) (ResourceEventHandlerRegistration, error) // AddEventHandlerWithResyncPeriod adds an event handler to the // shared informer with the requested resync period; zero means @@ -169,6 +172,10 @@ type SharedInformer interface { // HasSynced returns true if the shared informer's store has been // informed by at least one full LIST of the authoritative state // of the informer's object collection. This is unrelated to "resync". + // + // Note that this doesn't tell you if an individual handler is synced!! + // For that, please call HasSynced on the handle returned by + // AddEventHandler. HasSynced() bool // LastSyncResourceVersion is the resource version observed when last synced with the underlying // store. The value returned is not synchronized with access to the underlying store and is not @@ -213,7 +220,14 @@ type SharedInformer interface { // Opaque interface representing the registration of ResourceEventHandler for // a SharedInformer. Must be supplied back to the same SharedInformer's // `RemoveEventHandler` to unregister the handlers. -type ResourceEventHandlerRegistration interface{} +// +// Also used to tell if the handler is synced (has had all items in the initial +// list delivered). +type ResourceEventHandlerRegistration interface { + // HasSynced reports if both the parent has synced and all pre-sync + // events have been delivered. + HasSynced() bool +} // SharedIndexInformer provides add and get Indexers ability based on SharedInformer. type SharedIndexInformer interface { @@ -223,14 +237,26 @@ type SharedIndexInformer interface { GetIndexer() Indexer } -// NewSharedInformer creates a new instance for the listwatcher. +// NewSharedInformer creates a new instance for the ListerWatcher. See NewSharedIndexInformerWithOptions for full details. func NewSharedInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration) SharedInformer { return NewSharedIndexInformer(lw, exampleObject, defaultEventHandlerResyncPeriod, Indexers{}) } -// NewSharedIndexInformer creates a new instance for the listwatcher. -// The created informer will not do resyncs if the given -// defaultEventHandlerResyncPeriod is zero. Otherwise: for each +// NewSharedIndexInformer creates a new instance for the ListerWatcher and specified Indexers. See +// NewSharedIndexInformerWithOptions for full details. +func NewSharedIndexInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer { + return NewSharedIndexInformerWithOptions( + lw, + exampleObject, + SharedIndexInformerOptions{ + ResyncPeriod: defaultEventHandlerResyncPeriod, + Indexers: indexers, + }, + ) +} + +// NewSharedIndexInformerWithOptions creates a new instance for the ListerWatcher. +// The created informer will not do resyncs if options.ResyncPeriod is zero. Otherwise: for each // handler that with a non-zero requested resync period, whether added // before or after the informer starts, the nominal resync period is // the requested resync period rounded up to a multiple of the @@ -238,21 +264,36 @@ func NewSharedInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEv // checking period is established when the informer starts running, // and is the maximum of (a) the minimum of the resync periods // requested before the informer starts and the -// defaultEventHandlerResyncPeriod given here and (b) the constant +// options.ResyncPeriod given here and (b) the constant // `minimumResyncPeriod` defined in this file. -func NewSharedIndexInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer { +func NewSharedIndexInformerWithOptions(lw ListerWatcher, exampleObject runtime.Object, options SharedIndexInformerOptions) SharedIndexInformer { realClock := &clock.RealClock{} - sharedIndexInformer := &sharedIndexInformer{ + + return &sharedIndexInformer{ + indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, options.Indexers), processor: &sharedProcessor{clock: realClock}, - indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers), listerWatcher: lw, objectType: exampleObject, - resyncCheckPeriod: defaultEventHandlerResyncPeriod, - defaultEventHandlerResyncPeriod: defaultEventHandlerResyncPeriod, - cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", exampleObject)), + objectDescription: options.ObjectDescription, + resyncCheckPeriod: options.ResyncPeriod, + defaultEventHandlerResyncPeriod: options.ResyncPeriod, clock: realClock, + cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", exampleObject)), } - return sharedIndexInformer +} + +// SharedIndexInformerOptions configures a sharedIndexInformer. +type SharedIndexInformerOptions struct { + // ResyncPeriod is the default event handler resync period and resync check + // period. If unset/unspecified, these are defaulted to 0 (do not resync). + ResyncPeriod time.Duration + + // Indexers is the sharedIndexInformer's indexers. If unset/unspecified, no indexers are configured. + Indexers Indexers + + // ObjectDescription is the sharedIndexInformer's object description. This is passed through to the + // underlying Reflector's type description. + ObjectDescription string } // InformerSynced is a function that can be used to determine if an informer has synced. This is useful for determining if caches have synced. @@ -326,12 +367,13 @@ type sharedIndexInformer struct { listerWatcher ListerWatcher - // objectType is an example object of the type this informer is - // expected to handle. Only the type needs to be right, except - // that when that is `unstructured.Unstructured` the object's - // `"apiVersion"` and `"kind"` must also be right. + // objectType is an example object of the type this informer is expected to handle. If set, an event + // with an object with a mismatching type is dropped instead of being delivered to listeners. objectType runtime.Object + // objectDescription is the description of this informer's objects. This typically defaults to + objectDescription string + // resyncCheckPeriod is how often we want the reflector's resync timer to fire so it can call // shouldResync to check if any of our listeners need a resync. resyncCheckPeriod time.Duration @@ -381,7 +423,8 @@ type updateNotification struct { } type addNotification struct { - newObj interface{} + newObj interface{} + isInInitialList bool } type deleteNotification struct { @@ -425,12 +468,13 @@ func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) { }) cfg := &Config{ - Queue: fifo, - ListerWatcher: s.listerWatcher, - ObjectType: s.objectType, - FullResyncPeriod: s.resyncCheckPeriod, - RetryOnError: false, - ShouldResync: s.processor.shouldResync, + Queue: fifo, + ListerWatcher: s.listerWatcher, + ObjectType: s.objectType, + ObjectDescription: s.objectDescription, + FullResyncPeriod: s.resyncCheckPeriod, + RetryOnError: false, + ShouldResync: s.processor.shouldResync, Process: s.HandleDeltas, WatchErrorHandler: s.watchErrorHandler, @@ -559,7 +603,7 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv } } - listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now(), initialBufferSize) + listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now(), initialBufferSize, s.HasSynced) if !s.started { return s.processor.addListener(listener), nil @@ -575,27 +619,35 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv handle := s.processor.addListener(listener) for _, item := range s.indexer.List() { - listener.add(addNotification{newObj: item}) + // Note that we enqueue these notifications with the lock held + // and before returning the handle. That means there is never a + // chance for anyone to call the handle's HasSynced method in a + // state when it would falsely return true (i.e., when the + // shared informer is synced but it has not observed an Add + // with isInitialList being true, nor when the thread + // processing notifications somehow goes faster than this + // thread adding them and the counter is temporarily zero). + listener.add(addNotification{newObj: item, isInInitialList: true}) } return handle, nil } -func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error { +func (s *sharedIndexInformer) HandleDeltas(obj interface{}, isInInitialList bool) error { s.blockDeltas.Lock() defer s.blockDeltas.Unlock() if deltas, ok := obj.(Deltas); ok { - return processDeltas(s, s.indexer, s.transform, deltas) + return processDeltas(s, s.indexer, s.transform, deltas, isInInitialList) } return errors.New("object given as Process argument is not Deltas") } // Conforms to ResourceEventHandler -func (s *sharedIndexInformer) OnAdd(obj interface{}) { +func (s *sharedIndexInformer) OnAdd(obj interface{}, isInInitialList bool) { // Invocation of this function is locked under s.blockDeltas, so it is // save to distribute the notification s.cacheMutationDetector.AddObject(obj) - s.processor.distribute(addNotification{newObj: obj}, false) + s.processor.distribute(addNotification{newObj: obj, isInInitialList: isInInitialList}, false) } // Conforms to ResourceEventHandler @@ -817,6 +869,8 @@ type processorListener struct { handler ResourceEventHandler + syncTracker *synctrack.SingleFileTracker + // pendingNotifications is an unbounded ring buffer that holds all notifications not yet distributed. // There is one per listener, but a failing/stalled listener will have infinite pendingNotifications // added until we OOM. @@ -847,11 +901,18 @@ type processorListener struct { resyncLock sync.Mutex } -func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time, bufferSize int) *processorListener { +// HasSynced returns true if the source informer has synced, and all +// corresponding events have been delivered. +func (p *processorListener) HasSynced() bool { + return p.syncTracker.HasSynced() +} + +func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time, bufferSize int, hasSynced func() bool) *processorListener { ret := &processorListener{ nextCh: make(chan interface{}), addCh: make(chan interface{}), handler: handler, + syncTracker: &synctrack.SingleFileTracker{UpstreamHasSynced: hasSynced}, pendingNotifications: *buffer.NewRingGrowing(bufferSize), requestedResyncPeriod: requestedResyncPeriod, resyncPeriod: resyncPeriod, @@ -863,6 +924,9 @@ func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, res } func (p *processorListener) add(notification interface{}) { + if a, ok := notification.(addNotification); ok && a.isInInitialList { + p.syncTracker.Start() + } p.addCh <- notification } @@ -908,7 +972,10 @@ func (p *processorListener) run() { case updateNotification: p.handler.OnUpdate(notification.oldObj, notification.newObj) case addNotification: - p.handler.OnAdd(notification.newObj) + p.handler.OnAdd(notification.newObj, notification.isInInitialList) + if notification.isInInitialList { + p.syncTracker.Finished() + } case deleteNotification: p.handler.OnDelete(notification.oldObj) default: diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/synctrack/lazy.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/synctrack/lazy.go new file mode 100644 index 000000000000..ce51da9af35b --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/synctrack/lazy.go @@ -0,0 +1,83 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package synctrack + +import ( + "sync" + "sync/atomic" +) + +// Lazy defers the computation of `Evaluate` to when it is necessary. It is +// possible that Evaluate will be called in parallel from multiple goroutines. +type Lazy[T any] struct { + Evaluate func() (T, error) + + cache atomic.Pointer[cacheEntry[T]] +} + +type cacheEntry[T any] struct { + eval func() (T, error) + lock sync.RWMutex + result *T +} + +func (e *cacheEntry[T]) get() (T, error) { + if cur := func() *T { + e.lock.RLock() + defer e.lock.RUnlock() + return e.result + }(); cur != nil { + return *cur, nil + } + + e.lock.Lock() + defer e.lock.Unlock() + if e.result != nil { + return *e.result, nil + } + r, err := e.eval() + if err == nil { + e.result = &r + } + return r, err +} + +func (z *Lazy[T]) newCacheEntry() *cacheEntry[T] { + return &cacheEntry[T]{eval: z.Evaluate} +} + +// Notify should be called when something has changed necessitating a new call +// to Evaluate. +func (z *Lazy[T]) Notify() { z.cache.Swap(z.newCacheEntry()) } + +// Get should be called to get the current result of a call to Evaluate. If the +// current cached value is stale (due to a call to Notify), then Evaluate will +// be called synchronously. If subsequent calls to Get happen (without another +// Notify), they will all wait for the same return value. +// +// Error returns are not cached and will cause multiple calls to evaluate! +func (z *Lazy[T]) Get() (T, error) { + e := z.cache.Load() + if e == nil { + // Since we don't force a constructor, nil is a possible value. + // If multiple Gets race to set this, the swap makes sure only + // one wins. + z.cache.CompareAndSwap(nil, z.newCacheEntry()) + e = z.cache.Load() + } + return e.get() +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/synctrack/synctrack.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/synctrack/synctrack.go new file mode 100644 index 000000000000..c488b497ff1a --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/synctrack/synctrack.go @@ -0,0 +1,116 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package synctrack contains utilities for helping controllers track whether +// they are "synced" or not, that is, whether they have processed all items +// from the informer's initial list. +package synctrack + +import ( + "sync" + "sync/atomic" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// AsyncTracker helps propagate HasSynced in the face of multiple worker threads. +type AsyncTracker[T comparable] struct { + UpstreamHasSynced func() bool + + lock sync.Mutex + waiting sets.Set[T] +} + +// Start should be called prior to processing each key which is part of the +// initial list. +func (t *AsyncTracker[T]) Start(key T) { + t.lock.Lock() + defer t.lock.Unlock() + if t.waiting == nil { + t.waiting = sets.New[T](key) + } else { + t.waiting.Insert(key) + } +} + +// Finished should be called when finished processing a key which was part of +// the initial list. Since keys are tracked individually, nothing bad happens +// if you call Finished without a corresponding call to Start. This makes it +// easier to use this in combination with e.g. queues which don't make it easy +// to plumb through the isInInitialList boolean. +func (t *AsyncTracker[T]) Finished(key T) { + t.lock.Lock() + defer t.lock.Unlock() + if t.waiting != nil { + t.waiting.Delete(key) + } +} + +// HasSynced returns true if the source is synced and every key present in the +// initial list has been processed. This relies on the source not considering +// itself synced until *after* it has delivered the notification for the last +// key, and that notification handler must have called Start. +func (t *AsyncTracker[T]) HasSynced() bool { + // Call UpstreamHasSynced first: it might take a lock, which might take + // a significant amount of time, and we can't hold our lock while + // waiting on that or a user is likely to get a deadlock. + if !t.UpstreamHasSynced() { + return false + } + t.lock.Lock() + defer t.lock.Unlock() + return t.waiting.Len() == 0 +} + +// SingleFileTracker helps propagate HasSynced when events are processed in +// order (i.e. via a queue). +type SingleFileTracker struct { + UpstreamHasSynced func() bool + + count int64 +} + +// Start should be called prior to processing each key which is part of the +// initial list. +func (t *SingleFileTracker) Start() { + atomic.AddInt64(&t.count, 1) +} + +// Finished should be called when finished processing a key which was part of +// the initial list. You must never call Finished() before (or without) its +// corresponding Start(), that is a logic error that could cause HasSynced to +// return a wrong value. To help you notice this should it happen, Finished() +// will panic if the internal counter goes negative. +func (t *SingleFileTracker) Finished() { + result := atomic.AddInt64(&t.count, -1) + if result < 0 { + panic("synctrack: negative counter; this logic error means HasSynced may return incorrect value") + } +} + +// HasSynced returns true if the source is synced and every key present in the +// initial list has been processed. This relies on the source not considering +// itself synced until *after* it has delivered the notification for the last +// key, and that notification handler must have called Start. +func (t *SingleFileTracker) HasSynced() bool { + // Call UpstreamHasSynced first: it might take a lock, which might take + // a significant amount of time, and we don't want to then act on a + // stale count value. + if !t.UpstreamHasSynced() { + return false + } + return atomic.LoadInt64(&t.count) <= 0 +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/events/event_broadcaster.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/events/event_broadcaster.go index 951965e95eea..9f18ce45db16 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/events/event_broadcaster.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/events/event_broadcaster.go @@ -56,9 +56,11 @@ var defaultSleepDuration = 10 * time.Second // TODO: validate impact of copying and investigate hashing type eventKey struct { + eventType string action string reason string reportingController string + reportingInstance string regarding corev1.ObjectReference related corev1.ObjectReference } @@ -184,19 +186,21 @@ func (e *eventBroadcasterImpl) recordToSink(event *eventsv1.Event, clock clock.C Count: 1, LastObservedTime: metav1.MicroTime{Time: clock.Now()}, } - return isomorphicEvent + // Make a copy of the Event to make sure that recording it + // doesn't mess with the object stored in cache. + return isomorphicEvent.DeepCopy() } e.eventCache[eventKey] = eventCopy - return eventCopy + // Make a copy of the Event to make sure that recording it doesn't + // mess with the object stored in cache. + return eventCopy.DeepCopy() }() if evToRecord != nil { - recordedEvent := e.attemptRecording(evToRecord) - if recordedEvent != nil { - recordedEventKey := getKey(recordedEvent) - e.mu.Lock() - defer e.mu.Unlock() - e.eventCache[recordedEventKey] = recordedEvent - } + // TODO: Add a metric counting the number of recording attempts + e.attemptRecording(evToRecord) + // We don't want the new recorded Event to be reflected in the + // client's cache because server-side mutations could mess with the + // aggregation mechanism used by the client. } }() } @@ -248,6 +252,14 @@ func recordEvent(sink EventSink, event *eventsv1.Event) (*eventsv1.Event, bool) return nil, false case *errors.StatusError: if errors.IsAlreadyExists(err) { + // If we tried to create an Event from an EventSerie, it means that + // the original Patch request failed because the Event we were + // trying to patch didn't exist. If the creation failed because the + // Event now exists, it is safe to retry. This occurs when a new + // Event is emitted twice in a very short period of time. + if isEventSeries { + return nil, true + } klog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err) } else { klog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err) @@ -279,9 +291,11 @@ func createPatchBytesForSeries(event *eventsv1.Event) ([]byte, error) { func getKey(event *eventsv1.Event) eventKey { key := eventKey{ + eventType: event.Type, action: event.Action, reason: event.Reason, reportingController: event.ReportingController, + reportingInstance: event.ReportingInstance, regarding: event.Regarding, } if event.Related != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go index c64ba9b26b0f..eb72b2517bce 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go @@ -64,9 +64,8 @@ import ( "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" rl "k8s.io/client-go/tools/leaderelection/resourcelock" - "k8s.io/utils/clock" - "k8s.io/klog/v2" + "k8s.io/utils/clock" ) const ( @@ -199,9 +198,7 @@ type LeaderElector struct { // stopped holding the leader lease func (le *LeaderElector) Run(ctx context.Context) { defer runtime.HandleCrash() - defer func() { - le.config.Callbacks.OnStoppedLeading() - }() + defer le.config.Callbacks.OnStoppedLeading() if !le.acquire(ctx) { return // ctx signalled done @@ -263,6 +260,7 @@ func (le *LeaderElector) acquire(ctx context.Context) bool { // renew loops calling tryAcquireOrRenew and returns immediately when tryAcquireOrRenew fails or ctx signals done. func (le *LeaderElector) renew(ctx context.Context) { + defer le.config.Lock.RecordEvent("stopped leading") ctx, cancel := context.WithCancel(ctx) defer cancel() wait.Until(func() { @@ -278,7 +276,6 @@ func (le *LeaderElector) renew(ctx context.Context) { klog.V(5).Infof("successfully renewed lease %v", desc) return } - le.config.Lock.RecordEvent("stopped leading") le.metrics.leaderOff(le.config.Name) klog.Infof("failed to renew lease %v: %v", desc, err) cancel() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/watch/until.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/watch/until.go index 81d4ff0ddff1..a2474556b0a0 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/watch/until.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/watch/until.go @@ -101,8 +101,7 @@ func UntilWithoutRetry(ctx context.Context, watcher watch.Interface, conditions // It guarantees you to see all events and in the order they happened. // Due to this guarantee there is no way it can deal with 'Resource version too old error'. It will fail in this case. // (See `UntilWithSync` if you'd prefer to recover from all the errors including RV too old by re-listing -// -// those items. In normal code you should care about being level driven so you'd not care about not seeing all the edges.) +// those items. In normal code you should care about being level driven so you'd not care about not seeing all the edges.) // // The most frequent usage for Until would be a test where you want to verify exact order of events ("edges"). func Until(ctx context.Context, initialResourceVersion string, watcherClient cache.Watcher, conditions ...ConditionFunc) (*watch.Event, error) { @@ -137,7 +136,7 @@ func UntilWithSync(ctx context.Context, lw cache.ListerWatcher, objType runtime. if precondition != nil { if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) { - return nil, fmt.Errorf("UntilWithSync: unable to sync caches: %v", ctx.Err()) + return nil, fmt.Errorf("UntilWithSync: unable to sync caches: %w", ctx.Err()) } done, err := precondition(indexer) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/certificate_manager.go b/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/certificate_manager.go index dee21cf30890..b4dcb0b8490d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/certificate_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/certificate_manager.go @@ -21,9 +21,11 @@ import ( "crypto/ecdsa" "crypto/elliptic" cryptorand "crypto/rand" + "crypto/rsa" "crypto/tls" "crypto/x509" "encoding/pem" + "errors" "fmt" "reflect" "sync" @@ -32,7 +34,7 @@ import ( "k8s.io/klog/v2" certificates "k8s.io/api/certificates/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" @@ -42,9 +44,76 @@ import ( "k8s.io/client-go/util/keyutil" ) -// certificateWaitTimeout controls the amount of time we wait for certificate -// approval in one iteration. -var certificateWaitTimeout = 15 * time.Minute +var ( + // certificateWaitTimeout controls the amount of time we wait for certificate + // approval in one iteration. + certificateWaitTimeout = 15 * time.Minute + + kubeletServingUsagesWithEncipherment = []certificates.KeyUsage{ + // https://tools.ietf.org/html/rfc5280#section-4.2.1.3 + // + // Digital signature allows the certificate to be used to verify + // digital signatures used during TLS negotiation. + certificates.UsageDigitalSignature, + // KeyEncipherment allows the cert/key pair to be used to encrypt + // keys, including the symmetric keys negotiated during TLS setup + // and used for data transfer. + certificates.UsageKeyEncipherment, + // ServerAuth allows the cert to be used by a TLS server to + // authenticate itself to a TLS client. + certificates.UsageServerAuth, + } + kubeletServingUsagesNoEncipherment = []certificates.KeyUsage{ + // https://tools.ietf.org/html/rfc5280#section-4.2.1.3 + // + // Digital signature allows the certificate to be used to verify + // digital signatures used during TLS negotiation. + certificates.UsageDigitalSignature, + // ServerAuth allows the cert to be used by a TLS server to + // authenticate itself to a TLS client. + certificates.UsageServerAuth, + } + DefaultKubeletServingGetUsages = func(privateKey interface{}) []certificates.KeyUsage { + switch privateKey.(type) { + case *rsa.PrivateKey: + return kubeletServingUsagesWithEncipherment + default: + return kubeletServingUsagesNoEncipherment + } + } + kubeletClientUsagesWithEncipherment = []certificates.KeyUsage{ + // https://tools.ietf.org/html/rfc5280#section-4.2.1.3 + // + // Digital signature allows the certificate to be used to verify + // digital signatures used during TLS negotiation. + certificates.UsageDigitalSignature, + // KeyEncipherment allows the cert/key pair to be used to encrypt + // keys, including the symmetric keys negotiated during TLS setup + // and used for data transfer. + certificates.UsageKeyEncipherment, + // ClientAuth allows the cert to be used by a TLS client to + // authenticate itself to the TLS server. + certificates.UsageClientAuth, + } + kubeletClientUsagesNoEncipherment = []certificates.KeyUsage{ + // https://tools.ietf.org/html/rfc5280#section-4.2.1.3 + // + // Digital signature allows the certificate to be used to verify + // digital signatures used during TLS negotiation. + certificates.UsageDigitalSignature, + // ClientAuth allows the cert to be used by a TLS client to + // authenticate itself to the TLS server. + certificates.UsageClientAuth, + } + DefaultKubeletClientGetUsages = func(privateKey interface{}) []certificates.KeyUsage { + switch privateKey.(type) { + case *rsa.PrivateKey: + return kubeletClientUsagesWithEncipherment + default: + return kubeletClientUsagesNoEncipherment + } + } +) // Manager maintains and updates the certificates in use by this certificate // manager. In the background it communicates with the API server to get new @@ -94,8 +163,12 @@ type Config struct { // the issued certificate is not guaranteed as the signer may choose to ignore the request. RequestedCertificateLifetime *time.Duration // Usages is the types of usages that certificates generated by the manager - // can be used for. + // can be used for. It is mutually exclusive with GetUsages. Usages []certificates.KeyUsage + // GetUsages is dynamic way to get the types of usages that certificates generated by the manager + // can be used for. If Usages is not nil, GetUsages has to be nil, vice versa. + // It is mutually exclusive with Usages. + GetUsages func(privateKey interface{}) []certificates.KeyUsage // CertificateStore is a persistent store where the current cert/key is // kept and future cert/key pairs will be persisted after they are // generated. @@ -192,7 +265,7 @@ type manager struct { dynamicTemplate bool signerName string requestedCertificateLifetime *time.Duration - usages []certificates.KeyUsage + getUsages func(privateKey interface{}) []certificates.KeyUsage forceRotation bool certStore Store @@ -235,6 +308,18 @@ func NewManager(config *Config) (Manager, error) { getTemplate = func() *x509.CertificateRequest { return config.Template } } + if config.GetUsages != nil && config.Usages != nil { + return nil, errors.New("cannot specify both GetUsages and Usages") + } + if config.GetUsages == nil && config.Usages == nil { + return nil, errors.New("either GetUsages or Usages should be specified") + } + var getUsages func(interface{}) []certificates.KeyUsage + if config.GetUsages != nil { + getUsages = config.GetUsages + } else { + getUsages = func(interface{}) []certificates.KeyUsage { return config.Usages } + } m := manager{ stopCh: make(chan struct{}), clientsetFn: config.ClientsetFn, @@ -242,7 +327,7 @@ func NewManager(config *Config) (Manager, error) { dynamicTemplate: config.GetTemplate != nil, signerName: config.SignerName, requestedCertificateLifetime: config.RequestedCertificateLifetime, - usages: config.Usages, + getUsages: getUsages, certStore: config.CertificateStore, cert: cert, forceRotation: forceRotation, @@ -256,8 +341,9 @@ func NewManager(config *Config) (Manager, error) { name = m.signerName } if len(name) == 0 { + usages := getUsages(nil) switch { - case hasKeyUsage(config.Usages, certificates.UsageClientAuth): + case hasKeyUsage(usages, certificates.UsageClientAuth): name = string(certificates.UsageClientAuth) default: name = "certificate" @@ -416,7 +502,7 @@ func getCurrentCertificateOrBootstrap( bootstrapCert.Leaf = certs[0] if _, err := store.Update(bootstrapCertificatePEM, bootstrapKeyPEM); err != nil { - utilruntime.HandleError(fmt.Errorf("Unable to set the cert/key pair to the bootstrap certificate: %v", err)) + utilruntime.HandleError(fmt.Errorf("unable to set the cert/key pair to the bootstrap certificate: %v", err)) } return &bootstrapCert, true, nil @@ -464,9 +550,14 @@ func (m *manager) rotateCerts() (bool, error) { return false, nil } + getUsages := m.getUsages + if m.getUsages == nil { + getUsages = DefaultKubeletClientGetUsages + } + usages := getUsages(privateKey) // Call the Certificate Signing Request API to get a certificate for the - // new private key. - reqName, reqUID, err := csr.RequestCertificate(clientSet, csrPEM, "", m.signerName, m.requestedCertificateLifetime, m.usages, privateKey) + // new private key + reqName, reqUID, err := csr.RequestCertificate(clientSet, csrPEM, "", m.signerName, m.requestedCertificateLifetime, usages, privateKey) if err != nil { utilruntime.HandleError(fmt.Errorf("%s: Failed while requesting a signed certificate from the control plane: %v", m.name, err)) if m.certificateRenewFailure != nil { @@ -622,17 +713,17 @@ func (m *manager) updateServerError(err error) error { m.certAccessLock.Lock() defer m.certAccessLock.Unlock() switch { - case errors.IsUnauthorized(err): + case apierrors.IsUnauthorized(err): // SSL terminating proxies may report this error instead of the master m.serverHealth = true - case errors.IsUnexpectedServerError(err): + case apierrors.IsUnexpectedServerError(err): // generally indicates a proxy or other load balancer problem, rather than a problem coming // from the master m.serverHealth = false default: // Identify known errors that could be expected for a cert request that // indicate everything is working normally - m.serverHealth = errors.IsNotFound(err) || errors.IsForbidden(err) + m.serverHealth = apierrors.IsNotFound(err) || apierrors.IsForbidden(err) } return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/cloud-provider/cloud.go b/cluster-autoscaler/vendor/k8s.io/cloud-provider/cloud.go index 44c62ccc03a1..7e7bf9dfab89 100644 --- a/cluster-autoscaler/vendor/k8s.io/cloud-provider/cloud.go +++ b/cluster-autoscaler/vendor/k8s.io/cloud-provider/cloud.go @@ -218,6 +218,11 @@ type Route struct { Name string // TargetNode is the NodeName of the target instance. TargetNode types.NodeName + // EnableNodeAddresses is a feature gate for TargetNodeAddresses. If false, ignore TargetNodeAddresses. + // Without this, if users haven't updated their cloud-provider, reconcile() will delete and create same route every time. + EnableNodeAddresses bool + // TargetNodeAddresses are the Node IPs of the target Node. + TargetNodeAddresses []v1.NodeAddress // DestinationCIDR is the CIDR format IP range that this routing rule // applies to. DestinationCIDR string diff --git a/cluster-autoscaler/vendor/k8s.io/cloud-provider/service/helpers/helper.go b/cluster-autoscaler/vendor/k8s.io/cloud-provider/service/helpers/helper.go index 0609361762bf..e363c7db2c55 100644 --- a/cluster-autoscaler/vendor/k8s.io/cloud-provider/service/helpers/helper.go +++ b/cluster-autoscaler/vendor/k8s.io/cloud-provider/service/helpers/helper.go @@ -101,7 +101,7 @@ func RequestsOnlyLocalTraffic(service *v1.Service) bool { service.Spec.Type != v1.ServiceTypeNodePort { return false } - return service.Spec.ExternalTrafficPolicy == v1.ServiceExternalTrafficPolicyTypeLocal + return service.Spec.ExternalTrafficPolicy == v1.ServiceExternalTrafficPolicyLocal } // NeedsHealthCheck checks if service needs health check. diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/logs/api/v1/options.go b/cluster-autoscaler/vendor/k8s.io/component-base/logs/api/v1/options.go index 6270b2eb96fd..9990e31fff20 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/logs/api/v1/options.go +++ b/cluster-autoscaler/vendor/k8s.io/component-base/logs/api/v1/options.go @@ -19,7 +19,9 @@ package v1 import ( "flag" "fmt" + "io" "math" + "os" "strings" "time" @@ -31,6 +33,7 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" cliflag "k8s.io/component-base/cli/flag" "k8s.io/component-base/featuregate" + "k8s.io/component-base/logs/internal/setverbositylevel" "k8s.io/component-base/logs/klogflags" ) @@ -62,18 +65,41 @@ func NewLoggingConfiguration() *LoggingConfiguration { // The optional FeatureGate controls logging features. If nil, the default for // these features is used. func ValidateAndApply(c *LoggingConfiguration, featureGate featuregate.FeatureGate) error { - return ValidateAndApplyAsField(c, featureGate, nil) + return validateAndApply(c, nil, featureGate, nil) +} + +// ValidateAndApplyWithOptions is a variant of ValidateAndApply which accepts +// additional options beyond those that can be configured through the API. This +// is meant for testing. +func ValidateAndApplyWithOptions(c *LoggingConfiguration, options *LoggingOptions, featureGate featuregate.FeatureGate) error { + return validateAndApply(c, options, featureGate, nil) +} + +// +k8s:deepcopy-gen=false + +// LoggingOptions can be used with ValidateAndApplyWithOptions to override +// certain global defaults. +type LoggingOptions struct { + // ErrorStream can be used to override the os.Stderr default. + ErrorStream io.Writer + + // InfoStream can be used to override the os.Stdout default. + InfoStream io.Writer } // ValidateAndApplyAsField is a variant of ValidateAndApply that should be used // when the LoggingConfiguration is embedded in some larger configuration // structure. func ValidateAndApplyAsField(c *LoggingConfiguration, featureGate featuregate.FeatureGate, fldPath *field.Path) error { + return validateAndApply(c, nil, featureGate, fldPath) +} + +func validateAndApply(c *LoggingConfiguration, options *LoggingOptions, featureGate featuregate.FeatureGate, fldPath *field.Path) error { errs := Validate(c, featureGate, fldPath) if len(errs) > 0 { return errs.ToAggregate() } - return apply(c, featureGate) + return apply(c, options, featureGate) } // Validate can be used to check for invalid settings without applying them. @@ -156,7 +182,7 @@ func featureEnabled(featureGate featuregate.FeatureGate, feature featuregate.Fea return enabled } -func apply(c *LoggingConfiguration, featureGate featuregate.FeatureGate) error { +func apply(c *LoggingConfiguration, options *LoggingOptions, featureGate featuregate.FeatureGate) error { contextualLoggingEnabled := contextualLoggingDefault if featureGate != nil { contextualLoggingEnabled = featureGate.Enabled(ContextualLogging) @@ -167,8 +193,19 @@ func apply(c *LoggingConfiguration, featureGate featuregate.FeatureGate) error { if format.factory == nil { klog.ClearLogger() } else { - log, flush := format.factory.Create(*c) - klog.SetLoggerWithOptions(log, klog.ContextualLogger(contextualLoggingEnabled), klog.FlushLogger(flush)) + if options == nil { + options = &LoggingOptions{ + ErrorStream: os.Stderr, + InfoStream: os.Stdout, + } + } + log, control := format.factory.Create(*c, *options) + if control.SetVerbosityLevel != nil { + setverbositylevel.Mutex.Lock() + defer setverbositylevel.Mutex.Unlock() + setverbositylevel.Callbacks = append(setverbositylevel.Callbacks, control.SetVerbosityLevel) + } + klog.SetLoggerWithOptions(log, klog.ContextualLogger(contextualLoggingEnabled), klog.FlushLogger(control.Flush)) } if err := loggingFlags.Lookup("v").Value.Set(VerbosityLevelPflag(&c.Verbosity).String()); err != nil { return fmt.Errorf("internal error while setting klog verbosity: %v", err) diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/logs/api/v1/pflags.go b/cluster-autoscaler/vendor/k8s.io/component-base/logs/api/v1/pflags.go index 36a98cc81cf5..b74e132a7228 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/logs/api/v1/pflags.go +++ b/cluster-autoscaler/vendor/k8s.io/component-base/logs/api/v1/pflags.go @@ -36,6 +36,9 @@ type vmoduleConfigurationPFlag struct { // String returns the -vmodule parameter (comma-separated list of pattern=N). func (wrapper vmoduleConfigurationPFlag) String() string { + if wrapper.value == nil { + return "" + } var patterns []string for _, item := range *wrapper.value { patterns = append(patterns, fmt.Sprintf("%s=%d", item.FilePattern, item.Verbosity)) @@ -82,10 +85,16 @@ type verbosityLevelPflag struct { } func (wrapper verbosityLevelPflag) String() string { + if wrapper.value == nil { + return "0" + } return strconv.FormatInt(int64(*wrapper.value), 10) } func (wrapper verbosityLevelPflag) Get() interface{} { + if wrapper.value == nil { + return VerbosityLevel(0) + } return *wrapper.value } diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/logs/api/v1/registry.go b/cluster-autoscaler/vendor/k8s.io/component-base/logs/api/v1/registry.go index 78bc8f8853f1..f8fc1f2cae1a 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/logs/api/v1/registry.go +++ b/cluster-autoscaler/vendor/k8s.io/component-base/logs/api/v1/registry.go @@ -39,14 +39,29 @@ type logFormat struct { feature featuregate.Feature } +// +k8s:deepcopy-gen=false + +// RuntimeControl provides operations that aren't available through the normal +// Logger or LogSink API. +type RuntimeControl struct { + // Flush ensures that all in-memory data is written. + // May be nil. + Flush func() + + // SetVerbosityLevel changes the level for all Logger instances + // derived from the initial one. May be nil. + // + // The parameter is intentionally a plain uint32 instead of + // VerbosityLevel to enable implementations that don't need to import + // the API (helps avoid circular dependencies). + SetVerbosityLevel func(v uint32) error +} + // LogFormatFactory provides support for a certain additional, // non-default log format. type LogFormatFactory interface { // Create returns a logger with the requested configuration. - // Returning a flush function for the logger is optional. - // If provided, the caller must ensure that it is called - // periodically (if desired) and at program exit. - Create(c LoggingConfiguration) (log logr.Logger, flush func()) + Create(c LoggingConfiguration, o LoggingOptions) (logr.Logger, RuntimeControl) } // RegisterLogFormat registers support for a new logging format. This must be called diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/logs/internal/setverbositylevel/setverbositylevel.go b/cluster-autoscaler/vendor/k8s.io/component-base/logs/internal/setverbositylevel/setverbositylevel.go new file mode 100644 index 000000000000..c643bae9bc34 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/component-base/logs/internal/setverbositylevel/setverbositylevel.go @@ -0,0 +1,34 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package setverbositylevel stores callbacks that will be invoked by logs.GlogLevel. +// +// This is a separate package to avoid a dependency from +// k8s.io/component-base/logs (uses the callbacks) to +// k8s.io/component-base/logs/api/v1 (adds them). Not all users of the logs +// package also use the API. +package setverbositylevel + +import ( + "sync" +) + +var ( + // Mutex controls access to the callbacks. + Mutex sync.Mutex + + Callbacks []func(v uint32) error +) diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/logs/logs.go b/cluster-autoscaler/vendor/k8s.io/component-base/logs/logs.go index 886c154e4bf6..7fda0f9117f6 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/logs/logs.go +++ b/cluster-autoscaler/vendor/k8s.io/component-base/logs/logs.go @@ -23,10 +23,12 @@ import ( "flag" "fmt" "log" + "strconv" "time" "github.com/spf13/pflag" logsapi "k8s.io/component-base/logs/api/v1" + "k8s.io/component-base/logs/internal/setverbositylevel" "k8s.io/component-base/logs/klogflags" "k8s.io/klog/v2" ) @@ -182,11 +184,26 @@ func NewLogger(prefix string) *log.Logger { return log.New(KlogWriter{}, prefix, 0) } -// GlogSetter is a setter to set glog level. +// GlogSetter modifies the verbosity threshold for the entire program. +// Some components have HTTP-based APIs for invoking this at runtime. func GlogSetter(val string) (string, error) { + v, err := strconv.ParseUint(val, 10, 32) + if err != nil { + return "", err + } + var level klog.Level if err := level.Set(val); err != nil { return "", fmt.Errorf("failed set klog.logging.verbosity %s: %v", val, err) } + + setverbositylevel.Mutex.Lock() + defer setverbositylevel.Mutex.Unlock() + for _, cb := range setverbositylevel.Callbacks { + if err := cb(uint32(v)); err != nil { + return "", err + } + } + return fmt.Sprintf("successfully set klog.logging.verbosity to %s", val), nil } diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/metric.go b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/metric.go index cf5bccfa7b92..b3d0dd264f53 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/metric.go +++ b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/metric.go @@ -97,9 +97,8 @@ func (r *lazyMetric) lazyInit(self kubeCollector, fqName string) { // 2. if the metric is manually disabled via a CLI flag. // // Disclaimer: disabling a metric via a CLI flag has higher precedence than -// -// deprecation and will override show-hidden-metrics for the explicitly -// disabled metric. +// deprecation and will override show-hidden-metrics for the explicitly +// disabled metric. func (r *lazyMetric) preprocessMetric(version semver.Version) { disabledMetricsLock.RLock() defer disabledMetricsLock.RUnlock() diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/registry.go b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/registry.go index af7d1b8bffd8..9a7138c11f8e 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/registry.go +++ b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/registry.go @@ -157,6 +157,10 @@ type KubeRegistry interface { Reset() // RegisterMetaMetrics registers metrics about the number of registered metrics. RegisterMetaMetrics() + // Registerer exposes the underlying prometheus registerer + Registerer() prometheus.Registerer + // Gatherer exposes the underlying prometheus gatherer + Gatherer() prometheus.Gatherer } // kubeRegistry is a wrapper around a prometheus registry-type object. Upon initialization @@ -188,6 +192,16 @@ func (kr *kubeRegistry) Register(c Registerable) error { return nil } +// Registerer exposes the underlying prometheus.Registerer +func (kr *kubeRegistry) Registerer() prometheus.Registerer { + return kr.PromRegistry +} + +// Gatherer exposes the underlying prometheus.Gatherer +func (kr *kubeRegistry) Gatherer() prometheus.Gatherer { + return kr.PromRegistry +} + // MustRegister works like Register but registers any number of // Collectors and panics upon the first registration that causes an // error. diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/value.go b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/value.go index b525bb602acd..4a405048cf86 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/value.go +++ b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/value.go @@ -60,8 +60,7 @@ func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues // NewLazyMetricWithTimestamp is a helper of NewMetricWithTimestamp. // // Warning: the Metric 'm' must be the one created by NewLazyConstMetric(), -// -// otherwise, no stability guarantees would be offered. +// otherwise, no stability guarantees would be offered. func NewLazyMetricWithTimestamp(t time.Time, m Metric) Metric { if m == nil { return nil diff --git a/cluster-autoscaler/vendor/k8s.io/cri-api/pkg/apis/runtime/v1/api.proto b/cluster-autoscaler/vendor/k8s.io/cri-api/pkg/apis/runtime/v1/api.proto index 83a7ac312156..7df44f2d2ee0 100644 --- a/cluster-autoscaler/vendor/k8s.io/cri-api/pkg/apis/runtime/v1/api.proto +++ b/cluster-autoscaler/vendor/k8s.io/cri-api/pkg/apis/runtime/v1/api.proto @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// To regenerate api.pb.go run hack/update-generated-runtime.sh +// To regenerate api.pb.go run `hack/update-codegen.sh protobindings` syntax = "proto3"; package runtime.v1; diff --git a/cluster-autoscaler/vendor/k8s.io/kms/apis/v1beta1/api.pb.go b/cluster-autoscaler/vendor/k8s.io/kms/apis/v1beta1/api.pb.go index 6210014a2b5c..49c4713fb431 100644 --- a/cluster-autoscaler/vendor/k8s.io/kms/apis/v1beta1/api.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/kms/apis/v1beta1/api.pb.go @@ -322,25 +322,27 @@ func init() { func init() { proto.RegisterFile("api.proto", fileDescriptor_00212fb1f9d3bf1c) } var fileDescriptor_00212fb1f9d3bf1c = []byte{ - // 286 bytes of a gzipped FileDescriptorProto + // 308 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x52, 0x4f, 0x4b, 0xc3, 0x30, - 0x14, 0x5f, 0x27, 0xae, 0xec, 0x59, 0x5a, 0x08, 0x43, 0x8b, 0x27, 0xcd, 0x65, 0xea, 0xa1, 0x30, - 0xbd, 0x8b, 0x88, 0x9e, 0x44, 0x0f, 0x15, 0xbc, 0x4a, 0x56, 0x1e, 0x1a, 0xb0, 0x69, 0x4c, 0xb3, - 0xca, 0xbe, 0xa8, 0x9f, 0x47, 0x6c, 0x5e, 0x6b, 0x3a, 0x11, 0x77, 0x7c, 0x2f, 0xef, 0xf7, 0xef, - 0xbd, 0xc0, 0x54, 0x68, 0x99, 0x69, 0x53, 0xd9, 0x8a, 0x85, 0xcd, 0x62, 0x89, 0x56, 0x2c, 0xf8, - 0x19, 0xc4, 0x4f, 0x68, 0x6a, 0x59, 0xa9, 0x1c, 0xdf, 0x57, 0x58, 0x5b, 0x96, 0x42, 0xd8, 0xb8, - 0x4e, 0x1a, 0x1c, 0x05, 0x27, 0xd3, 0xbc, 0x2b, 0xf9, 0x07, 0x24, 0xfd, 0x6c, 0xad, 0x2b, 0x55, - 0xe3, 0xdf, 0xc3, 0xec, 0x18, 0x22, 0xb3, 0x52, 0x56, 0x96, 0xf8, 0xac, 0x44, 0x89, 0xe9, 0xb8, - 0x7d, 0xde, 0xa3, 0xde, 0x83, 0x28, 0x91, 0xcd, 0x21, 0xe9, 0x46, 0x3a, 0x92, 0x9d, 0x76, 0x2a, - 0xa6, 0x36, 0xa9, 0xf1, 0x6b, 0x88, 0x6f, 0xb0, 0x30, 0x6b, 0x6d, 0xff, 0x35, 0xc9, 0xf6, 0x61, - 0x52, 0x48, 0xfd, 0x8a, 0xa6, 0x55, 0x8c, 0x72, 0xaa, 0xf8, 0x1c, 0x92, 0x9e, 0x83, 0xcc, 0xcf, - 0x60, 0x57, 0xbf, 0x09, 0xe9, 0x28, 0xa2, 0xdc, 0x15, 0xfc, 0x0a, 0xe2, 0x5b, 0xb5, 0xa5, 0x58, - 0xcf, 0x30, 0xf6, 0x19, 0x4e, 0x21, 0xe9, 0x19, 0x48, 0xea, 0xc7, 0x55, 0xe0, 0xbb, 0x3a, 0xff, - 0x0c, 0x60, 0x76, 0x87, 0xeb, 0x7b, 0xa1, 0xc4, 0x0b, 0x96, 0xa8, 0xec, 0x23, 0x9a, 0x46, 0x16, - 0xc8, 0x2e, 0x21, 0xa4, 0xf4, 0xec, 0x20, 0xa3, 0x63, 0x65, 0xc3, 0x4b, 0x1d, 0xa6, 0xbf, 0x1f, - 0x9c, 0x1c, 0x1f, 0x7d, 0xe3, 0x29, 0xae, 0x87, 0x1f, 0x2e, 0xd1, 0xc3, 0x6f, 0x6c, 0xc6, 0xe1, - 0x29, 0x83, 0x87, 0x1f, 0xee, 0xc5, 0xc3, 0x6f, 0xc4, 0xe5, 0xa3, 0xe5, 0xa4, 0xfd, 0x67, 0x17, - 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x57, 0xc8, 0x65, 0x5a, 0x74, 0x02, 0x00, 0x00, + 0x14, 0x5f, 0x27, 0x6e, 0xec, 0x59, 0x5a, 0x08, 0xc3, 0x55, 0x4f, 0x9a, 0xcb, 0xd4, 0x43, 0xcb, + 0xf4, 0xe2, 0x49, 0x64, 0xe8, 0x49, 0xf4, 0x50, 0xc1, 0x83, 0x17, 0xc9, 0xca, 0x43, 0xc3, 0x6c, + 0x1a, 0x93, 0xac, 0xb2, 0x2f, 0xea, 0xe7, 0x11, 0xdb, 0xb4, 0xa6, 0x13, 0xd1, 0xe3, 0x7b, 0xf9, + 0xfd, 0x79, 0xbf, 0xf7, 0x02, 0x23, 0x26, 0x79, 0x2c, 0x55, 0x61, 0x0a, 0x32, 0x2c, 0x67, 0x0b, + 0x34, 0x6c, 0x46, 0x4f, 0x20, 0x78, 0x40, 0xa5, 0x79, 0x21, 0x52, 0x7c, 0x5b, 0xa1, 0x36, 0x24, + 0x82, 0x61, 0x59, 0x77, 0x22, 0xef, 0xc0, 0x3b, 0x1a, 0xa5, 0x4d, 0x49, 0xdf, 0x21, 0x6c, 0xb1, + 0x5a, 0x16, 0x42, 0xe3, 0xef, 0x60, 0x72, 0x08, 0xbe, 0x5a, 0x09, 0xc3, 0x73, 0x7c, 0x12, 0x2c, + 0xc7, 0xa8, 0x5f, 0x3d, 0xef, 0xd8, 0xde, 0x1d, 0xcb, 0x91, 0x4c, 0x21, 0x6c, 0x20, 0x8d, 0xc8, + 0x56, 0x85, 0x0a, 0x6c, 0xdb, 0xba, 0xd1, 0x39, 0x04, 0x57, 0x98, 0xa9, 0xb5, 0x34, 0x7f, 0x0e, + 0x49, 0x76, 0x61, 0x90, 0x71, 0xf9, 0x82, 0xaa, 0x72, 0xf4, 0x53, 0x5b, 0xd1, 0x29, 0x84, 0xad, + 0x86, 0x1d, 0x7e, 0x0c, 0xdb, 0xf2, 0x95, 0xf1, 0x5a, 0xc2, 0x4f, 0xeb, 0x82, 0x5e, 0x42, 0x70, + 0x2d, 0xfe, 0x69, 0xd6, 0x2a, 0xf4, 0x5d, 0x85, 0x63, 0x08, 0x5b, 0x05, 0x6b, 0xf5, 0x3d, 0x95, + 0xe7, 0x4e, 0x75, 0xfa, 0xe1, 0xc1, 0xf8, 0x06, 0xd7, 0xb7, 0x4c, 0xb0, 0x67, 0xcc, 0x51, 0x98, + 0x7b, 0x54, 0x25, 0xcf, 0x90, 0x5c, 0xc0, 0xd0, 0xa6, 0x27, 0x93, 0xd8, 0x1e, 0x2b, 0xee, 0x5e, + 0x6a, 0x3f, 0xfa, 0xf9, 0x50, 0xdb, 0xd1, 0xde, 0x17, 0xdf, 0xc6, 0x75, 0xf8, 0xdd, 0x25, 0x3a, + 0xfc, 0x8d, 0xcd, 0xd4, 0x7c, 0x9b, 0xc1, 0xe1, 0x77, 0xf7, 0xe2, 0xf0, 0x37, 0xe2, 0xd2, 0xde, + 0x7c, 0xef, 0x71, 0xb2, 0x3c, 0xd7, 0x31, 0x2f, 0x92, 0x65, 0xae, 0x13, 0x26, 0xb9, 0x4e, 0x2c, + 0x78, 0x31, 0xa8, 0xbe, 0xe0, 0xd9, 0x67, 0x00, 0x00, 0x00, 0xff, 0xff, 0x13, 0xcb, 0x8d, 0x9b, + 0x8f, 0x02, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/cluster-autoscaler/vendor/k8s.io/kms/apis/v1beta1/api.proto b/cluster-autoscaler/vendor/k8s.io/kms/apis/v1beta1/api.proto index a9b44bf5fa10..22450edcd876 100644 --- a/cluster-autoscaler/vendor/k8s.io/kms/apis/v1beta1/api.proto +++ b/cluster-autoscaler/vendor/k8s.io/kms/apis/v1beta1/api.proto @@ -14,10 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -// To regenerate api.pb.go run hack/update-generated-kms.sh +// To regenerate api.pb.go run `hack/update-codegen.sh protobindings` syntax = "proto3"; package v1beta1; +option go_package = "k8s.io/kms/apis/v1beta1"; // This service defines the public APIs for remote KMS provider. service KeyManagementService { diff --git a/cluster-autoscaler/vendor/k8s.io/kms/apis/v2alpha1/api.pb.go b/cluster-autoscaler/vendor/k8s.io/kms/apis/v2alpha1/api.pb.go index ab55bd1b3e68..d8ea552fd5b4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kms/apis/v2alpha1/api.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/kms/apis/v2alpha1/api.pb.go @@ -355,32 +355,33 @@ func init() { func init() { proto.RegisterFile("api.proto", fileDescriptor_00212fb1f9d3bf1c) } var fileDescriptor_00212fb1f9d3bf1c = []byte{ - // 391 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x93, 0xcf, 0x4f, 0xe2, 0x40, - 0x14, 0xc7, 0x29, 0x5d, 0x60, 0x79, 0xb0, 0x40, 0x26, 0x6c, 0xb6, 0x4b, 0x36, 0x1b, 0x32, 0x27, - 0x76, 0x0f, 0xdd, 0x2c, 0x5e, 0x8c, 0x89, 0x06, 0x13, 0x39, 0x18, 0xf4, 0x52, 0x8e, 0x1e, 0xcc, - 0x08, 0x2f, 0x76, 0x42, 0x9d, 0xd6, 0x76, 0xda, 0x58, 0xff, 0x50, 0x13, 0xff, 0x01, 0xff, 0x0e, - 0xd3, 0x76, 0xa0, 0x2d, 0x88, 0x9e, 0xbc, 0xcd, 0xfb, 0xd1, 0xef, 0xf7, 0xcd, 0x67, 0x5e, 0xa1, - 0xc9, 0x3c, 0x6e, 0x7a, 0xbe, 0x2b, 0x5d, 0xf2, 0x35, 0x1a, 0x33, 0xc7, 0xb3, 0xd9, 0x7f, 0xda, - 0x85, 0x6f, 0x73, 0xc9, 0x64, 0x18, 0x58, 0x78, 0x1f, 0x62, 0x20, 0xe9, 0x15, 0x74, 0xd6, 0x89, - 0xc0, 0x73, 0x45, 0x80, 0xc4, 0x80, 0x46, 0x84, 0x7e, 0xc0, 0x5d, 0x61, 0x68, 0x43, 0x6d, 0xd4, - 0xb4, 0xd6, 0x61, 0x52, 0xb1, 0x91, 0x39, 0xd2, 0x7e, 0x34, 0xaa, 0x59, 0x45, 0x85, 0xe4, 0x3b, - 0xd4, 0x57, 0x18, 0x5f, 0xf3, 0xa5, 0xa1, 0xa7, 0x85, 0xda, 0x0a, 0xe3, 0xf3, 0x25, 0x7d, 0xd1, - 0xa0, 0x73, 0x86, 0x0b, 0x3f, 0xf6, 0xa4, 0xf2, 0x23, 0xbf, 0x01, 0x16, 0xdc, 0xb3, 0xd1, 0x97, - 0xf8, 0x20, 0x53, 0x83, 0xb6, 0x55, 0xc8, 0x90, 0x1e, 0xe8, 0x21, 0x5f, 0x2a, 0xfd, 0xe4, 0xb8, - 0x47, 0x9b, 0xcc, 0xa0, 0xc5, 0x84, 0x70, 0x25, 0x93, 0xdc, 0x15, 0x81, 0xf1, 0x65, 0xa8, 0x8f, - 0x5a, 0xe3, 0x3f, 0xe6, 0xfa, 0xa6, 0x66, 0xd9, 0xd7, 0x3c, 0xcd, 0x7b, 0xa7, 0x42, 0xfa, 0xb1, - 0x55, 0xfc, 0x7a, 0x70, 0x02, 0xbd, 0xed, 0x86, 0x64, 0x92, 0x15, 0xc6, 0x8a, 0x41, 0x72, 0x24, - 0x7d, 0xa8, 0x45, 0xcc, 0x09, 0x31, 0x9d, 0xae, 0x6d, 0x65, 0xc1, 0x51, 0xf5, 0x50, 0xa3, 0xff, - 0xa0, 0xbb, 0xf1, 0x53, 0x18, 0x7f, 0x41, 0xd3, 0x73, 0x18, 0x17, 0x85, 0x7b, 0xe6, 0x09, 0x3a, - 0x81, 0xce, 0x54, 0x94, 0xc0, 0xbc, 0xdb, 0xbf, 0x8b, 0x85, 0x3e, 0x69, 0xd0, 0xdd, 0x48, 0x28, - 0xcf, 0x8f, 0xe0, 0xe6, 0x28, 0xab, 0x45, 0x94, 0x17, 0x65, 0x94, 0x7a, 0x8a, 0xf2, 0x6f, 0x8e, - 0x72, 0xcb, 0xe6, 0x73, 0x59, 0x8e, 0x9f, 0x35, 0xe8, 0xcf, 0x30, 0xbe, 0x64, 0x82, 0xdd, 0xe2, - 0x1d, 0x0a, 0x39, 0x47, 0x3f, 0xe2, 0x0b, 0x24, 0xc7, 0x50, 0xcf, 0x56, 0x95, 0xfc, 0xc8, 0x67, - 0x2b, 0x6d, 0xf3, 0xc0, 0xd8, 0x2d, 0x64, 0x33, 0xd3, 0x0a, 0x99, 0x40, 0x43, 0xbd, 0x11, 0x31, - 0xf6, 0xad, 0xc9, 0xe0, 0xe7, 0x1b, 0x95, 0xa2, 0x82, 0x42, 0x51, 0x54, 0x28, 0xbf, 0x63, 0x51, - 0x61, 0x8b, 0x1b, 0xad, 0xdc, 0xd4, 0xd3, 0xff, 0xf1, 0xe0, 0x35, 0x00, 0x00, 0xff, 0xff, 0xa7, - 0xdd, 0xa1, 0x79, 0x9c, 0x03, 0x00, 0x00, + // 410 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x93, 0xcf, 0x6f, 0xd3, 0x30, + 0x14, 0xc7, 0x97, 0x86, 0x75, 0xf4, 0x6d, 0xb4, 0x93, 0x35, 0x84, 0x89, 0x10, 0x9a, 0x7c, 0x1a, + 0x1c, 0x12, 0x51, 0x2e, 0x13, 0x12, 0x68, 0x20, 0x76, 0x40, 0x83, 0x4b, 0x7a, 0x83, 0x03, 0x32, + 0xed, 0x13, 0xb1, 0x92, 0x3a, 0x26, 0x76, 0x22, 0xc2, 0x1f, 0x8a, 0xc4, 0x3f, 0xc0, 0xdf, 0x81, + 0x92, 0x38, 0x4d, 0xd2, 0x52, 0x38, 0xed, 0xe6, 0xf7, 0x23, 0xdf, 0xef, 0xf3, 0xc7, 0x2f, 0x30, + 0xe1, 0x4a, 0xf8, 0x2a, 0x4b, 0x4d, 0x4a, 0xee, 0x16, 0x73, 0x9e, 0xa8, 0x88, 0x3f, 0x63, 0x33, + 0xb8, 0xb7, 0x30, 0xdc, 0xe4, 0x3a, 0xc4, 0x6f, 0x39, 0x6a, 0xc3, 0x3e, 0xc1, 0xb4, 0x4d, 0x68, + 0x95, 0x4a, 0x8d, 0x84, 0xc2, 0x51, 0x81, 0x99, 0x16, 0xa9, 0xa4, 0xce, 0xb9, 0x73, 0x31, 0x09, + 0xdb, 0xb0, 0xaa, 0x44, 0xc8, 0x13, 0x13, 0xfd, 0xa0, 0xa3, 0xa6, 0x62, 0x43, 0x72, 0x1f, 0xc6, + 0x31, 0x96, 0x9f, 0xc5, 0x8a, 0xba, 0x75, 0xe1, 0x30, 0xc6, 0xf2, 0xdd, 0x8a, 0xfd, 0x76, 0x60, + 0xfa, 0x16, 0x97, 0x59, 0xa9, 0x8c, 0xf5, 0x23, 0x8f, 0x01, 0x96, 0x42, 0x45, 0x98, 0x19, 0xfc, + 0x6e, 0x6a, 0x83, 0x93, 0xb0, 0x97, 0x21, 0xa7, 0xe0, 0xe6, 0x62, 0x65, 0xf5, 0xab, 0xe3, 0x1e, + 0x6d, 0x72, 0x03, 0xc7, 0x5c, 0xca, 0xd4, 0x70, 0x23, 0x52, 0xa9, 0xe9, 0x9d, 0x73, 0xf7, 0xe2, + 0x78, 0xfe, 0xc4, 0x6f, 0x6f, 0xea, 0x0f, 0x7d, 0xfd, 0xd7, 0x5d, 0xef, 0xb5, 0x34, 0x59, 0x19, + 0xf6, 0xbf, 0xf6, 0x5e, 0xc1, 0xe9, 0x76, 0x43, 0x35, 0x49, 0x8c, 0xa5, 0x65, 0x50, 0x1d, 0xc9, + 0x19, 0x1c, 0x16, 0x3c, 0xc9, 0xb1, 0x9e, 0xee, 0x24, 0x6c, 0x82, 0x17, 0xa3, 0x4b, 0x87, 0x05, + 0x30, 0xdb, 0xf8, 0x59, 0x8c, 0x8f, 0x60, 0xa2, 0x12, 0x2e, 0x64, 0xef, 0x9e, 0x5d, 0x82, 0x5d, + 0xc1, 0xf4, 0x5a, 0x0e, 0xc0, 0xfc, 0xb3, 0x7f, 0x17, 0x0b, 0xfb, 0xe9, 0xc0, 0x6c, 0x23, 0x61, + 0x3d, 0xff, 0x07, 0xb7, 0x43, 0x39, 0xea, 0xa3, 0x7c, 0x3f, 0x44, 0xe9, 0xd6, 0x28, 0x9f, 0x76, + 0x28, 0xb7, 0x6c, 0x6e, 0x97, 0xe5, 0xfc, 0x97, 0x03, 0x67, 0x37, 0x58, 0x7e, 0xe0, 0x92, 0x7f, + 0xc5, 0x35, 0x4a, 0xb3, 0xc0, 0xac, 0x10, 0x4b, 0x24, 0x2f, 0x61, 0xdc, 0xac, 0x2a, 0x79, 0xd0, + 0xcd, 0x36, 0xd8, 0x66, 0x8f, 0xee, 0x16, 0x9a, 0x99, 0xd9, 0x01, 0xb9, 0x82, 0x23, 0xfb, 0x46, + 0x84, 0xee, 0x5b, 0x13, 0xef, 0xe1, 0x5f, 0x2a, 0x7d, 0x05, 0x8b, 0xa2, 0xaf, 0x30, 0x7c, 0xc7, + 0xbe, 0xc2, 0x16, 0x37, 0x76, 0xf0, 0xc6, 0xfb, 0x48, 0xe3, 0x4b, 0xed, 0x8b, 0x34, 0x88, 0xd7, + 0x3a, 0xe0, 0x4a, 0xe8, 0xa0, 0xed, 0xfe, 0x32, 0xae, 0xff, 0xd5, 0xe7, 0x7f, 0x02, 0x00, 0x00, + 0xff, 0xff, 0x77, 0x22, 0x41, 0x93, 0xb8, 0x03, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/cluster-autoscaler/vendor/k8s.io/kms/apis/v2alpha1/api.proto b/cluster-autoscaler/vendor/k8s.io/kms/apis/v2alpha1/api.proto index 9b960b2acc43..095dcdddc06b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kms/apis/v2alpha1/api.proto +++ b/cluster-autoscaler/vendor/k8s.io/kms/apis/v2alpha1/api.proto @@ -14,10 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -// To regenerate api.pb.go run hack/update-generated-kms.sh +// To regenerate api.pb.go run `hack/update-codegen.sh protobindings` syntax = "proto3"; package v2alpha1; +option go_package = "k8s.io/kms/apis/v2alpha1"; // This service defines the public APIs for remote KMS provider. service KeyManagementService { diff --git a/cluster-autoscaler/vendor/k8s.io/kms/service/grpc_service.go b/cluster-autoscaler/vendor/k8s.io/kms/service/grpc_service.go new file mode 100644 index 000000000000..d717064b06b2 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kms/service/grpc_service.go @@ -0,0 +1,141 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "context" + "net" + "time" + + "google.golang.org/grpc" + + "k8s.io/klog/v2" + kmsapi "k8s.io/kms/apis/v2alpha1" +) + +// GRPCService is a gprc server that runs the kms v2 alpha 1 API. +type GRPCService struct { + addr string + timeout time.Duration + server *grpc.Server + + kmsService Service +} + +var _ kmsapi.KeyManagementServiceServer = (*GRPCService)(nil) + +// NewGRPCService creates an instance of GRPCService. +func NewGRPCService( + address string, + timeout time.Duration, + + kmsService Service, +) *GRPCService { + klog.V(4).InfoS("KMS plugin configured", "address", address, "timeout", timeout) + + return &GRPCService{ + addr: address, + timeout: timeout, + kmsService: kmsService, + } +} + +// ListenAndServe accepts incoming connections on a Unix socket. It is a blocking method. +// Returns non-nil error unless Close or Shutdown is called. +func (s *GRPCService) ListenAndServe() error { + ln, err := net.Listen("unix", s.addr) + if err != nil { + return err + } + defer ln.Close() + + gs := grpc.NewServer( + grpc.ConnectionTimeout(s.timeout), + ) + s.server = gs + + kmsapi.RegisterKeyManagementServiceServer(gs, s) + + klog.V(4).InfoS("kms plugin serving", "address", s.addr) + return gs.Serve(ln) +} + +// Shutdown performs a grafecul shutdown. Doesn't accept new connections and +// blocks until all pending RPCs are finished. +func (s *GRPCService) Shutdown() { + klog.V(4).InfoS("kms plugin shutdown", "address", s.addr) + if s.server != nil { + s.server.GracefulStop() + } +} + +// Close stops the server by closing all connections immediately and cancels +// all active RPCs. +func (s *GRPCService) Close() { + klog.V(4).InfoS("kms plugin close", "address", s.addr) + if s.server != nil { + s.server.Stop() + } +} + +// Status sends a status request to specified kms service. +func (s *GRPCService) Status(ctx context.Context, _ *kmsapi.StatusRequest) (*kmsapi.StatusResponse, error) { + res, err := s.kmsService.Status(ctx) + if err != nil { + return nil, err + } + + return &kmsapi.StatusResponse{ + Version: res.Version, + Healthz: res.Healthz, + KeyId: res.KeyID, + }, nil +} + +// Decrypt sends a decryption request to specified kms service. +func (s *GRPCService) Decrypt(ctx context.Context, req *kmsapi.DecryptRequest) (*kmsapi.DecryptResponse, error) { + klog.V(4).InfoS("decrypt request received", "id", req.Uid) + + plaintext, err := s.kmsService.Decrypt(ctx, req.Uid, &DecryptRequest{ + Ciphertext: req.Ciphertext, + KeyID: req.KeyId, + Annotations: req.Annotations, + }) + if err != nil { + return nil, err + } + + return &kmsapi.DecryptResponse{ + Plaintext: plaintext, + }, nil +} + +// Encrypt sends an encryption request to specified kms service. +func (s *GRPCService) Encrypt(ctx context.Context, req *kmsapi.EncryptRequest) (*kmsapi.EncryptResponse, error) { + klog.V(4).InfoS("encrypt request received", "id", req.Uid) + + encRes, err := s.kmsService.Encrypt(ctx, req.Uid, req.Plaintext) + if err != nil { + return nil, err + } + + return &kmsapi.EncryptResponse{ + Ciphertext: encRes.Ciphertext, + KeyId: encRes.KeyID, + Annotations: encRes.Annotations, + }, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kms/service/interface.go b/cluster-autoscaler/vendor/k8s.io/kms/service/interface.go new file mode 100644 index 000000000000..fad71fa0a3c1 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kms/service/interface.go @@ -0,0 +1,50 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import "context" + +// Service allows encrypting and decrypting data using an external Key Management Service. +type Service interface { + // Decrypt a given bytearray to obtain the original data as bytes. + Decrypt(ctx context.Context, uid string, req *DecryptRequest) ([]byte, error) + // Encrypt bytes to a ciphertext. + Encrypt(ctx context.Context, uid string, data []byte) (*EncryptResponse, error) + // Status returns the status of the KMS. + Status(ctx context.Context) (*StatusResponse, error) +} + +// EncryptResponse is the response from the Envelope service when encrypting data. +type EncryptResponse struct { + Ciphertext []byte + KeyID string + Annotations map[string][]byte +} + +// DecryptRequest is the request to the Envelope service when decrypting data. +type DecryptRequest struct { + Ciphertext []byte + KeyID string + Annotations map[string][]byte +} + +// StatusResponse is the response from the Envelope service when getting the status of the service. +type StatusResponse struct { + Version string + Healthz string + KeyID string +} diff --git a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/common/common.go b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/common/common.go index 24f2b0e8897c..1a6c12e17a5c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/common/common.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/common/common.go @@ -246,38 +246,42 @@ var schemaTypeFormatMap = map[string]typeInfo{ // the spec does not need to be simple type,format) or can even return a simple type,format (e.g. IntOrString). For simple // type formats, the benefit of adding OpenAPIDefinitionGetter interface is to keep both type and property documentation. // Example: -// type Sample struct { -// ... -// // port of the server -// port IntOrString -// ... -// } +// +// type Sample struct { +// ... +// // port of the server +// port IntOrString +// ... +// } +// // // IntOrString documentation... // type IntOrString { ... } // // Adding IntOrString to this function: -// "port" : { -// format: "string", -// type: "int-or-string", -// Description: "port of the server" -// } +// +// "port" : { +// format: "string", +// type: "int-or-string", +// Description: "port of the server" +// } // // Implement OpenAPIDefinitionGetter for IntOrString: // -// "port" : { -// $Ref: "#/definitions/IntOrString" -// Description: "port of the server" -// } +// "port" : { +// $Ref: "#/definitions/IntOrString" +// Description: "port of the server" +// } +// // ... // definitions: -// { -// "IntOrString": { -// format: "string", -// type: "int-or-string", -// Description: "IntOrString documentation..." // new -// } -// } // +// { +// "IntOrString": { +// format: "string", +// type: "int-or-string", +// Description: "IntOrString documentation..." // new +// } +// } func OpenAPITypeFormat(typeName string) (string, string) { mapped, ok := schemaTypeFormatMap[typeName] if !ok { diff --git a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/common/restfuladapter/adapter.go b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/common/restfuladapter/adapter.go index 6755f8760c65..932b84a01f17 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/common/restfuladapter/adapter.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/common/restfuladapter/adapter.go @@ -13,4 +13,3 @@ func AdaptWebServices(webServices []*restful.WebService) []common.RouteContainer } return containers } - diff --git a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/handler/handler.go b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/handler/handler.go index 7a2590dcb912..43b358ccc478 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/handler/handler.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/handler/handler.go @@ -18,11 +18,9 @@ package handler import ( "bytes" - "compress/gzip" "crypto/sha512" "encoding/json" "fmt" - "mime" "net/http" "strconv" "sync" @@ -41,15 +39,6 @@ import ( "k8s.io/kube-openapi/pkg/validation/spec" ) -const ( - jsonExt = ".json" - - mimeJson = "application/json" - // TODO(mehdy): change @68f4ded to a version tag when gnostic add version tags. - mimePb = "application/com.github.googleapis.gnostic.OpenAPIv2@68f4ded+protobuf" - mimePbGz = "application/x-gzip" -) - func computeETag(data []byte) string { if data == nil { return "" @@ -70,12 +59,6 @@ type OpenAPIService struct { etagCache handler.HandlerCache } -func init() { - mime.AddExtensionType(".json", mimeJson) - mime.AddExtensionType(".pb-v1", mimePb) - mime.AddExtensionType(".gz", mimePbGz) -} - // NewOpenAPIService builds an OpenAPIService starting with the given spec. func NewOpenAPIService(spec *spec.Swagger) (*OpenAPIService, error) { o := &OpenAPIService{} @@ -146,14 +129,6 @@ func ToProtoBinary(json []byte) ([]byte, error) { return proto.Marshal(document) } -func toGzip(data []byte) []byte { - var buf bytes.Buffer - zw := gzip.NewWriter(&buf) - zw.Write(data) - zw.Close() - return buf.Bytes() -} - // RegisterOpenAPIVersionedService registers a handler to provide access to provided swagger spec. // // Deprecated: use OpenAPIService.RegisterOpenAPIVersionedService instead. diff --git a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go index ec4adcdec2aa..e8fbcd1d3762 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go @@ -21,7 +21,6 @@ import ( "crypto/sha512" "encoding/json" "fmt" - "mime" "net/http" "net/url" "path" @@ -41,13 +40,6 @@ import ( ) const ( - jsonExt = ".json" - - mimeJson = "application/json" - // TODO(mehdy): change @68f4ded to a version tag when gnostic add version tags. - mimePb = "application/com.github.googleapis.gnostic.OpenAPIv3@68f4ded+protobuf" - mimePbGz = "application/x-gzip" - subTypeProtobuf = "com.github.proto-openapi.spec.v3@v1.0+protobuf" subTypeJSON = "json" ) @@ -84,12 +76,6 @@ type OpenAPIV3Group struct { etagCache handler.HandlerCache } -func init() { - mime.AddExtensionType(".json", mimeJson) - mime.AddExtensionType(".pb-v1", mimePb) - mime.AddExtensionType(".gz", mimePbGz) -} - func computeETag(data []byte) string { if data == nil { return "" @@ -191,6 +177,8 @@ func ToV3ProtoBinary(json []byte) ([]byte, error) { func (o *OpenAPIService) HandleDiscovery(w http.ResponseWriter, r *http.Request) { data, _ := o.getGroupBytes() + w.Header().Set("Etag", strconv.Quote(computeETag(data))) + w.Header().Set("Content-Type", "application/json") http.ServeContent(w, r, "/openapi/v3", time.Now(), bytes.NewReader(data)) } diff --git a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/doc.go b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/doc.go index ba4af4b7b175..e4eefa3de9f8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/doc.go @@ -8,8 +8,7 @@ // primitive data types such as booleans, strings, and numbers, // in addition to structured data types such as objects and arrays. // -// -// Terminology +// # Terminology // // This package uses the terms "encode" and "decode" for syntactic functionality // that is concerned with processing JSON based on its grammar, and @@ -32,8 +31,7 @@ // // See RFC 8259 for more information. // -// -// Specifications +// # Specifications // // Relevant specifications include RFC 4627, RFC 7159, RFC 7493, RFC 8259, // and RFC 8785. Each RFC is generally a stricter subset of another RFC. @@ -60,8 +58,7 @@ // In particular, it makes specific choices about behavior that RFC 8259 // leaves as undefined in order to ensure greater interoperability. // -// -// JSON Representation of Go structs +// # JSON Representation of Go structs // // A Go struct is naturally represented as a JSON object, // where each Go struct field corresponds with a JSON object member. diff --git a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/example.go b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/example.go index 0f5ab983cc9c..84e21d7232ad 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/example.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/example.go @@ -19,8 +19,8 @@ package spec3 import ( "encoding/json" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/validation/spec" ) // Example https://swagger.io/specification/#example-object diff --git a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go index 117113e7a79e..065f4887bf3c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go @@ -18,8 +18,8 @@ package spec3 import ( "encoding/json" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/validation/spec" ) type ExternalDocumentation struct { diff --git a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go index 828fd8dc56a0..d502a465c394 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go @@ -56,7 +56,7 @@ func (m *MediaType) UnmarshalJSON(data []byte) error { // MediaTypeProps a struct that allows you to specify content format, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#mediaTypeObject type MediaTypeProps struct { // Schema holds the schema defining the type used for the media type - Schema *spec.Schema `json:"schema,omitempty"` + Schema *spec.Schema `json:"schema,omitempty"` // Example of the media type Example interface{} `json:"example,omitempty"` // Examples of the media type. Each example object should match the media type and specific schema if present diff --git a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go index de8aa46021c0..9eae3994d3be 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go @@ -19,8 +19,8 @@ package spec3 import ( "encoding/json" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/validation/spec" ) // Operation describes a single API operation on a path, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#operationObject diff --git a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/path.go b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/path.go index bc48c504deb6..4a0cae2a4cd8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/path.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/path.go @@ -20,8 +20,8 @@ import ( "encoding/json" "strings" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/validation/spec" ) // Paths describes the available paths and operations for the API, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#pathsObject diff --git a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go index 0adc628266b8..c00c043bd060 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go @@ -19,8 +19,8 @@ package spec3 import ( "encoding/json" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/validation/spec" ) // RequestBody describes a single request body, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#requestBodyObject diff --git a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/response.go b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/response.go index ccd73369f752..9be76656086f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/response.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/response.go @@ -20,8 +20,8 @@ import ( "encoding/json" "strconv" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/validation/spec" ) // Responses holds the list of possible responses as they are returned from executing this operation @@ -149,7 +149,6 @@ type ResponseProps struct { Links map[string]*Link `json:"links,omitempty"` } - // Link represents a possible design-time link for a response, more at https://swagger.io/specification/#link-object type Link struct { spec.Refable diff --git a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/security_requirement.go b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/security_requirement.go index 0ce8924efd7e..683b4de80624 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/security_requirement.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/security_requirement.go @@ -19,8 +19,8 @@ package spec3 import ( "encoding/json" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/validation/spec" ) // SecurityRequirementProps describes the required security schemes to execute an operation, more at https://swagger.io/specification/#security-requirement-object diff --git a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go index 9b1352f4e3f4..edf7e6de3f69 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go @@ -19,8 +19,8 @@ package spec3 import ( "encoding/json" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/validation/spec" ) // SecurityScheme defines reusable Security Scheme Object, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#securitySchemeObject diff --git a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/server.go b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/server.go index a505fb2218b5..77104dff3763 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/server.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/spec3/server.go @@ -18,9 +18,8 @@ package spec3 import ( "encoding/json" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" - + "k8s.io/kube-openapi/pkg/validation/spec" ) type Server struct { diff --git a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go index a3f476d5d850..519dcf2ebaee 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go @@ -120,7 +120,7 @@ func (d *Definitions) ParseSchemaV3(s *openapi_v3.Schema, path *Path) (Schema, e switch s.GetType() { case object: for _, extension := range s.GetSpecificationExtension() { - if extension.Name == "x-kuberentes-group-version-kind" { + if extension.Name == "x-kubernetes-group-version-kind" { // Objects with x-kubernetes-group-version-kind are always top // level types. return d.parseV3Kind(s, path) @@ -285,7 +285,7 @@ func parseV3Interface(def *yaml.Node) (interface{}, error) { func (d *Definitions) parseV3BaseSchema(s *openapi_v3.Schema, path *Path) (*BaseSchema, error) { if s == nil { - return nil, fmt.Errorf("cannot initializae BaseSchema from nil") + return nil, fmt.Errorf("cannot initialize BaseSchema from nil") } def, err := parseV3Interface(s.GetDefault().ToRawInfo()) diff --git a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/util/util.go b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/util/util.go index 0be06989e669..6eee935b22a9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/util/util.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/util/util.go @@ -30,6 +30,7 @@ import ( // in GetCanonicalTypeName means Go type names with full package path. // // Examples of REST friendly OpenAPI name: +// // Input: k8s.io/api/core/v1.Pod // Output: io.k8s.api.core.v1.Pod // @@ -45,6 +46,7 @@ func ToCanonicalName(name string) string { // ToRESTFriendlyName converts Golang package/type canonical name into REST friendly OpenAPI name. // // Examples of REST friendly OpenAPI name: +// // Input: k8s.io/api/core/v1.Pod // Output: io.k8s.api.core.v1.Pod // @@ -71,18 +73,21 @@ func ToRESTFriendlyName(name string) string { // OpenAPI canonical names are Go type names with full package path, for uniquely indentifying // a model / Go type. If a Go type is vendored from another package, only the path after "/vendor/" // should be used. For custom resource definition (CRD), the canonical name is expected to be -// group/version.kind +// +// group/version.kind // // Examples of canonical name: -// Go type: k8s.io/kubernetes/pkg/apis/core.Pod -// CRD: csi.storage.k8s.io/v1alpha1.CSINodeInfo +// +// Go type: k8s.io/kubernetes/pkg/apis/core.Pod +// CRD: csi.storage.k8s.io/v1alpha1.CSINodeInfo // // Example for vendored Go type: -// Original full path: k8s.io/kubernetes/vendor/k8s.io/api/core/v1.Pod -// Canonical name: k8s.io/api/core/v1.Pod // -// Original full path: vendor/k8s.io/api/core/v1.Pod -// Canonical name: k8s.io/api/core/v1.Pod +// Original full path: k8s.io/kubernetes/vendor/k8s.io/api/core/v1.Pod +// Canonical name: k8s.io/api/core/v1.Pod +// +// Original full path: vendor/k8s.io/api/core/v1.Pod +// Canonical name: k8s.io/api/core/v1.Pod type OpenAPICanonicalTypeNamer interface { OpenAPICanonicalTypeName() string } diff --git a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go index 7c63d440aa12..6699fc4c6e87 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go @@ -114,7 +114,9 @@ func (p *Paths) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Deco p.Paths[k] = pi default: _, err := dec.ReadValue() // skip value - return err + if err != nil { + return err + } } } default: diff --git a/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1/types_pluginargs.go b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1/types_pluginargs.go index 2698a47696f2..3be0d9ce1931 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1/types_pluginargs.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1/types_pluginargs.go @@ -52,6 +52,10 @@ type InterPodAffinityArgs struct { // HardPodAffinityWeight is the scoring weight for existing pods with a // matching hard affinity to the incoming pod. HardPodAffinityWeight *int32 `json:"hardPodAffinityWeight,omitempty"` + + // IgnorePreferredTermsOfExistingPods configures the scheduler to ignore existing pods' preferred affinity + // rules when scoring candidate nodes, unless the incoming pod has inter-pod affinities. + IgnorePreferredTermsOfExistingPods bool `json:"ignorePreferredTermsOfExistingPods"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1beta2/types_pluginargs.go b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1beta2/types_pluginargs.go index 3ab964f42002..602db995df2d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1beta2/types_pluginargs.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1beta2/types_pluginargs.go @@ -52,6 +52,10 @@ type InterPodAffinityArgs struct { // HardPodAffinityWeight is the scoring weight for existing pods with a // matching hard affinity to the incoming pod. HardPodAffinityWeight *int32 `json:"hardPodAffinityWeight,omitempty"` + + // IgnorePreferredTermsOfExistingPods configures the scheduler to ignore existing pods' preferred affinity + // rules when scoring candidate nodes, unless the incoming pod has inter-pod affinities. + IgnorePreferredTermsOfExistingPods bool `json:"ignorePreferredTermsOfExistingPods"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1beta3/types_pluginargs.go b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1beta3/types_pluginargs.go index 725cd1b92d4d..acc0089c2d2e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1beta3/types_pluginargs.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1beta3/types_pluginargs.go @@ -52,6 +52,10 @@ type InterPodAffinityArgs struct { // HardPodAffinityWeight is the scoring weight for existing pods with a // matching hard affinity to the incoming pod. HardPodAffinityWeight *int32 `json:"hardPodAffinityWeight,omitempty"` + + // IgnorePreferredTermsOfExistingPods configures the scheduler to ignore existing pods' preferred affinity + // rules when scoring candidate nodes, unless the incoming pod has inter-pod affinities. + IgnorePreferredTermsOfExistingPods bool `json:"ignorePreferredTermsOfExistingPods"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/cluster-autoscaler/vendor/k8s.io/kubelet/config/v1beta1/types.go b/cluster-autoscaler/vendor/k8s.io/kubelet/config/v1beta1/types.go index 3fab1abbe9cd..79bca2b3459f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubelet/config/v1beta1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubelet/config/v1beta1/types.go @@ -803,6 +803,18 @@ type KubeletConfiguration struct { // Default: true // +optional LocalStorageCapacityIsolation *bool `json:"localStorageCapacityIsolation,omitempty"` + + // ContainerRuntimeEndpoint is the endpoint of container runtime. + // Unix Domain Sockets are supported on Linux, while npipe and tcp endpoints are supported on Windows. + // Examples:'unix:///path/to/runtime.sock', 'npipe:////./pipe/runtime' + ContainerRuntimeEndpoint string `json:"containerRuntimeEndpoint"` + + // ImageServiceEndpoint is the endpoint of container image service. + // Unix Domain Socket are supported on Linux, while npipe and tcp endpoints are supported on Windows. + // Examples:'unix:///path/to/runtime.sock', 'npipe:////./pipe/runtime'. + // If not specified, the value in containerRuntimeEndpoint is used. + // +optional + ImageServiceEndpoint string `json:"imageServiceEndpoint,omitempty"` } type KubeletAuthorizationMode string diff --git a/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1/api.pb.go b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1/api.pb.go index 4f183239ca4a..a24d42881f38 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1/api.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1/api.pb.go @@ -1101,71 +1101,73 @@ func init() { func init() { proto.RegisterFile("api.proto", fileDescriptor_00212fb1f9d3bf1c) } var fileDescriptor_00212fb1f9d3bf1c = []byte{ - // 1014 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x5f, 0x6f, 0x1b, 0x45, - 0x10, 0xcf, 0xc5, 0x4d, 0x62, 0x8f, 0x5d, 0x27, 0xdd, 0x84, 0xc8, 0xb9, 0x04, 0x37, 0xdd, 0x14, - 0x1a, 0xa4, 0xc6, 0x21, 0x2e, 0x6a, 0x11, 0x0f, 0x08, 0x17, 0x07, 0xb0, 0x42, 0x53, 0xeb, 0x42, - 0xc5, 0x03, 0x88, 0xd3, 0xf9, 0x6e, 0x63, 0x9f, 0x38, 0xef, 0x1e, 0xb7, 0x6b, 0x4b, 0xae, 0x84, - 0xc4, 0x03, 0x1f, 0xa0, 0xdf, 0x01, 0xbe, 0x02, 0xdf, 0xa1, 0x8f, 0x3c, 0xf2, 0x84, 0xa8, 0xf9, - 0x22, 0xe8, 0x76, 0xef, 0x9f, 0x2e, 0x17, 0x2b, 0x95, 0x78, 0xf3, 0xce, 0xcc, 0x6f, 0xfe, 0xfc, - 0x66, 0x3c, 0x73, 0x50, 0xb1, 0x7c, 0xb7, 0xe5, 0x07, 0x4c, 0x30, 0xb4, 0x36, 0x3d, 0x19, 0x10, - 0x61, 0x9d, 0xe8, 0x47, 0x43, 0x57, 0x8c, 0x26, 0x83, 0x96, 0xcd, 0xc6, 0xc7, 0x43, 0x36, 0x64, - 0xc7, 0x52, 0x3f, 0x98, 0x5c, 0xca, 0x97, 0x7c, 0xc8, 0x5f, 0x0a, 0x87, 0x5f, 0x69, 0xb0, 0xd9, - 0x25, 0x53, 0xd7, 0x26, 0x7d, 0x6f, 0x32, 0x74, 0xe9, 0x73, 0x5f, 0xb8, 0x8c, 0x72, 0xf4, 0x10, - 0x90, 0x1f, 0x10, 0x93, 0x0b, 0x2b, 0x10, 0x66, 0x40, 0x7e, 0x9a, 0xb8, 0x01, 0x71, 0x1a, 0xda, - 0xbe, 0x76, 0x58, 0x36, 0x36, 0xfc, 0x80, 0x5c, 0x84, 0x0a, 0x23, 0x92, 0xa3, 0x33, 0xc0, 0x43, - 0x22, 0x4c, 0x3f, 0x20, 0x97, 0x24, 0x08, 0x88, 0x63, 0x5a, 0x9e, 0xc7, 0x6c, 0x2b, 0x74, 0x65, - 0x5a, 0x53, 0xcb, 0xf5, 0xac, 0x81, 0x47, 0x1a, 0xcb, 0x12, 0x7d, 0x77, 0x48, 0x44, 0x3f, 0x36, - 0xec, 0x24, 0x76, 0x9d, 0xd8, 0x0c, 0xff, 0xae, 0xc1, 0xba, 0x41, 0x86, 0x2e, 0x17, 0x24, 0x08, - 0x23, 0x10, 0x2e, 0x50, 0x03, 0xd6, 0xa6, 0x24, 0xe0, 0x2e, 0xa3, 0x32, 0x87, 0x8a, 0x11, 0x3f, - 0x91, 0x0e, 0x65, 0x42, 0x1d, 0x9f, 0xb9, 0x54, 0xc8, 0x00, 0x15, 0x23, 0x79, 0xa3, 0x03, 0xb8, - 0x1d, 0x10, 0xce, 0x26, 0x81, 0x4d, 0x4c, 0x6a, 0x8d, 0x49, 0xa3, 0x24, 0x0d, 0x6a, 0xb1, 0xf0, - 0xdc, 0x1a, 0x13, 0xf4, 0x18, 0xd6, 0x98, 0x2a, 0xba, 0x71, 0x6b, 0x5f, 0x3b, 0xac, 0xb6, 0xf7, - 0x5a, 0x11, 0x97, 0xad, 0x02, 0x62, 0x8c, 0xd8, 0x18, 0xaf, 0xc1, 0xca, 0xe9, 0xd8, 0x17, 0x33, - 0xdc, 0x81, 0xad, 0xaf, 0x5d, 0x2e, 0x3a, 0xd4, 0xf9, 0xd6, 0x12, 0xf6, 0xc8, 0x20, 0xdc, 0x67, - 0x94, 0x13, 0xf4, 0x01, 0xac, 0x39, 0xd2, 0x01, 0x6f, 0x68, 0xfb, 0xa5, 0xc3, 0x6a, 0x7b, 0x3d, - 0xe7, 0xd8, 0x88, 0xf5, 0xf8, 0x09, 0xd4, 0xbe, 0x61, 0x3e, 0xf3, 0xd8, 0x70, 0xd6, 0xa3, 0x97, - 0x0c, 0x3d, 0x80, 0x15, 0xca, 0x9c, 0x04, 0x78, 0x27, 0x01, 0x9e, 0xbf, 0x78, 0xd6, 0x39, 0x67, - 0x0e, 0x31, 0x94, 0x1e, 0xeb, 0x50, 0x8e, 0x45, 0xa8, 0x0e, 0xcb, 0xbd, 0xae, 0xa4, 0xa7, 0x64, - 0x2c, 0xf7, 0xba, 0xd8, 0x86, 0x55, 0x15, 0x27, 0xa3, 0xa9, 0x84, 0x1a, 0xb4, 0x0d, 0xab, 0x23, - 0x62, 0x79, 0x62, 0x14, 0x31, 0x16, 0xbd, 0xd0, 0x09, 0x94, 0x45, 0x94, 0x86, 0xa4, 0xaa, 0xda, - 0x7e, 0x27, 0x89, 0x9c, 0xcd, 0xcf, 0x48, 0xcc, 0xf0, 0x19, 0x34, 0xfa, 0xd1, 0x34, 0x7c, 0xce, - 0xa8, 0xb0, 0x5c, 0x9a, 0x36, 0xed, 0x18, 0xaa, 0x51, 0x81, 0xa6, 0xeb, 0xa8, 0x5a, 0x2a, 0x4f, - 0xeb, 0xf3, 0xbf, 0xef, 0x82, 0xca, 0x8b, 0xf7, 0xba, 0xdc, 0x80, 0xc8, 0xa4, 0xe7, 0x70, 0xbc, - 0x0b, 0x3b, 0x05, 0xce, 0x14, 0x9d, 0x78, 0x06, 0x7a, 0xc1, 0xd8, 0xc4, 0xb1, 0xbe, 0x03, 0x64, - 0xc7, 0x10, 0x39, 0xaf, 0x84, 0x8b, 0x98, 0xbe, 0x87, 0x49, 0x11, 0x89, 0xd7, 0xeb, 0x3d, 0x19, - 0x77, 0xec, 0x5c, 0x1d, 0x1c, 0xff, 0xa1, 0xc1, 0xc1, 0x0d, 0xa0, 0xe8, 0x18, 0x36, 0x93, 0x69, - 0x37, 0x55, 0x5d, 0xbd, 0x6e, 0x54, 0xb8, 0x81, 0x12, 0x55, 0x37, 0xd6, 0xa0, 0x8f, 0x60, 0x7b, - 0x3c, 0xe1, 0xc2, 0x74, 0xa9, 0xed, 0x4d, 0x9c, 0x2c, 0x66, 0x59, 0x62, 0xb6, 0x42, 0x6d, 0x4f, - 0x29, 0x53, 0xd4, 0x03, 0x58, 0xcf, 0xfc, 0xbf, 0xb8, 0xfb, 0x52, 0x0d, 0xf6, 0x8a, 0x51, 0x4f, - 0xc5, 0x17, 0xee, 0x4b, 0x82, 0x7f, 0x86, 0xdd, 0xc2, 0x6c, 0xa3, 0x01, 0xfd, 0x01, 0x36, 0xb3, - 0x9c, 0x29, 0x69, 0x4c, 0xda, 0xd1, 0x0d, 0x49, 0x53, 0x28, 0x03, 0xd9, 0xf9, 0x86, 0x71, 0xdc, - 0x85, 0xfb, 0x37, 0xc1, 0xa2, 0x3d, 0xa8, 0xe4, 0xc9, 0x4a, 0x05, 0xd8, 0x86, 0xf5, 0x08, 0x43, - 0x62, 0x9e, 0xfb, 0x0b, 0x9a, 0x7d, 0xef, 0x6a, 0xde, 0x39, 0x78, 0x51, 0x87, 0xcf, 0xa0, 0x71, - 0x9d, 0xf9, 0xdb, 0x8f, 0xf1, 0x10, 0x36, 0x52, 0x1f, 0x51, 0x8d, 0x17, 0x8b, 0xb8, 0xc6, 0x8b, - 0x72, 0x5e, 0x40, 0xf0, 0xaf, 0x25, 0xd8, 0xb9, 0x16, 0x81, 0x3e, 0x83, 0x5b, 0x84, 0x4e, 0x17, - 0xfc, 0x09, 0xf2, 0x88, 0xd6, 0x29, 0x9d, 0xf2, 0x53, 0x2a, 0x82, 0x99, 0x21, 0x91, 0xe8, 0x7d, - 0x58, 0x1d, 0xb3, 0x09, 0x15, 0x6a, 0x1c, 0xab, 0xed, 0x7a, 0xe2, 0xe3, 0x59, 0x28, 0x36, 0x22, - 0x2d, 0x3a, 0x4a, 0x37, 0x5d, 0x49, 0x1a, 0x6e, 0xe6, 0x36, 0xdd, 0x85, 0x4f, 0xec, 0x64, 0xdb, - 0xa1, 0x17, 0x50, 0xb5, 0x28, 0x65, 0xc2, 0x8a, 0xb7, 0x6e, 0x08, 0x79, 0x74, 0x83, 0xfc, 0x3a, - 0x29, 0x4a, 0xa5, 0x99, 0xf5, 0xa3, 0x3f, 0x81, 0x4a, 0x52, 0x00, 0xda, 0x80, 0xd2, 0x8f, 0x64, - 0x16, 0xed, 0xbc, 0xf0, 0x27, 0xda, 0x82, 0x95, 0xa9, 0xe5, 0x4d, 0x48, 0xb4, 0xf3, 0xd4, 0xe3, - 0x93, 0xe5, 0x8f, 0x35, 0xfd, 0x53, 0xd8, 0xc8, 0x7b, 0x7e, 0x1b, 0x3c, 0x1e, 0xc1, 0x8a, 0xe4, - 0x03, 0xbd, 0x07, 0xf5, 0xb4, 0xc9, 0xbe, 0x25, 0x46, 0x11, 0xfe, 0x76, 0x22, 0xed, 0x5b, 0x62, - 0x84, 0x76, 0xa1, 0x32, 0x62, 0x5c, 0x28, 0x8b, 0xe8, 0x66, 0x85, 0x82, 0x58, 0x19, 0x10, 0xcb, - 0x31, 0x19, 0xf5, 0xd4, 0x12, 0x2e, 0x1b, 0xe5, 0x50, 0xf0, 0x9c, 0x7a, 0x33, 0x1c, 0x00, 0xa4, - 0x84, 0xfe, 0x2f, 0xe1, 0xf6, 0xa1, 0xea, 0x93, 0x60, 0xec, 0x72, 0x2e, 0x7b, 0xa1, 0x0e, 0x64, - 0x56, 0xd4, 0xfe, 0x02, 0x6a, 0xea, 0x1a, 0x07, 0x92, 0x1f, 0xf4, 0x18, 0xca, 0xf1, 0x75, 0x46, - 0x8d, 0xa4, 0x69, 0xb9, 0x83, 0xad, 0xa7, 0xa3, 0xa2, 0x8e, 0xe4, 0x52, 0xfb, 0xb7, 0x12, 0xd4, - 0xb2, 0x07, 0x15, 0x7d, 0x05, 0xdb, 0x5f, 0x12, 0x51, 0xf4, 0xf1, 0x91, 0x03, 0xeb, 0x0b, 0x2f, - 0x32, 0x5e, 0x42, 0x1d, 0xa8, 0x65, 0x2f, 0xf0, 0x15, 0xfc, 0xbb, 0xc9, 0xbb, 0xe8, 0x50, 0xe3, - 0xa5, 0x0f, 0x35, 0x44, 0x64, 0x32, 0x05, 0x5b, 0x0a, 0x1d, 0x24, 0xe0, 0xeb, 0x37, 0xbf, 0x7e, - 0x7f, 0xb1, 0x51, 0x1c, 0x08, 0x75, 0xa0, 0x1c, 0x4f, 0x75, 0x86, 0xbc, 0xdc, 0xc6, 0xd1, 0x77, - 0x0a, 0x34, 0x89, 0x8b, 0xef, 0xe1, 0xce, 0x95, 0x23, 0x89, 0xee, 0x65, 0xe3, 0x17, 0x5e, 0x63, - 0x1d, 0x2f, 0x32, 0x89, 0xbd, 0x3f, 0xdd, 0x7b, 0xfd, 0xa6, 0xa9, 0xfd, 0xf5, 0xa6, 0xb9, 0xf4, - 0xcb, 0xbc, 0xa9, 0xbd, 0x9e, 0x37, 0xb5, 0x3f, 0xe7, 0x4d, 0xed, 0x9f, 0x79, 0x53, 0x7b, 0xf5, - 0x6f, 0x73, 0x69, 0xb0, 0x2a, 0x3f, 0x1a, 0x1f, 0xfd, 0x17, 0x00, 0x00, 0xff, 0xff, 0x8b, 0x2d, - 0xfa, 0x93, 0x79, 0x0a, 0x00, 0x00, + // 1044 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xdd, 0x6e, 0x1b, 0x45, + 0x14, 0xce, 0xc6, 0x4d, 0x62, 0x1f, 0xa7, 0xf9, 0x99, 0x84, 0xc8, 0xd9, 0x14, 0x37, 0x9d, 0x14, + 0x1a, 0xa4, 0xc4, 0x26, 0x2e, 0x6a, 0x2b, 0x2e, 0x10, 0x2e, 0x0e, 0x60, 0x42, 0xd3, 0x68, 0x43, + 0x85, 0x04, 0x08, 0x6b, 0xbc, 0x3b, 0xb1, 0x57, 0x59, 0xcf, 0x2c, 0x3b, 0x63, 0x4b, 0xae, 0x84, + 0xc4, 0x05, 0x0f, 0xd0, 0x77, 0x80, 0x57, 0xe0, 0x1d, 0x7a, 0xc9, 0x25, 0x57, 0x88, 0x9a, 0x17, + 0x41, 0x9e, 0xd9, 0x3f, 0x6d, 0x36, 0x56, 0x2a, 0x71, 0xe7, 0x39, 0xe7, 0x7c, 0xe7, 0xe7, 0x3b, + 0x67, 0xcf, 0x31, 0x94, 0x88, 0xef, 0xd6, 0xfc, 0x80, 0x4b, 0x8e, 0x96, 0x46, 0x47, 0x5d, 0x2a, + 0xc9, 0x91, 0x79, 0xd8, 0x73, 0x65, 0x7f, 0xd8, 0xad, 0xd9, 0x7c, 0x50, 0xef, 0xf1, 0x1e, 0xaf, + 0x2b, 0x7d, 0x77, 0x78, 0xa1, 0x5e, 0xea, 0xa1, 0x7e, 0x69, 0x1c, 0x7e, 0x65, 0xc0, 0x46, 0x8b, + 0x8e, 0x5c, 0x9b, 0x9e, 0x79, 0xc3, 0x9e, 0xcb, 0x9e, 0xfb, 0xd2, 0xe5, 0x4c, 0xa0, 0x03, 0x40, + 0x7e, 0x40, 0x3b, 0x42, 0x92, 0x40, 0x76, 0x02, 0xfa, 0xd3, 0xd0, 0x0d, 0xa8, 0x53, 0x31, 0x76, + 0x8d, 0xfd, 0xa2, 0xb5, 0xe6, 0x07, 0xf4, 0x7c, 0xaa, 0xb0, 0x42, 0x39, 0x3a, 0x01, 0xdc, 0xa3, + 0xb2, 0xe3, 0x07, 0xf4, 0x82, 0x06, 0x01, 0x75, 0x3a, 0xc4, 0xf3, 0xb8, 0x4d, 0xa6, 0xae, 0x3a, + 0x64, 0x44, 0x5c, 0x8f, 0x74, 0x3d, 0x5a, 0x99, 0x57, 0xe8, 0xbb, 0x3d, 0x2a, 0xcf, 0x22, 0xc3, + 0x66, 0x6c, 0xd7, 0x8c, 0xcc, 0xf0, 0xef, 0x06, 0xac, 0x5a, 0xb4, 0xe7, 0x0a, 0x49, 0x83, 0x69, + 0x04, 0x2a, 0x24, 0xaa, 0xc0, 0xd2, 0x88, 0x06, 0xc2, 0xe5, 0x4c, 0xe5, 0x50, 0xb2, 0xa2, 0x27, + 0x32, 0xa1, 0x48, 0x99, 0xe3, 0x73, 0x97, 0x49, 0x15, 0xa0, 0x64, 0xc5, 0x6f, 0xb4, 0x07, 0xb7, + 0x03, 0x2a, 0xf8, 0x30, 0xb0, 0x69, 0x87, 0x91, 0x01, 0xad, 0x14, 0x94, 0xc1, 0x72, 0x24, 0x3c, + 0x25, 0x03, 0x8a, 0x1e, 0xc1, 0x12, 0xd7, 0x45, 0x57, 0x6e, 0xed, 0x1a, 0xfb, 0xe5, 0xc6, 0x9d, + 0x5a, 0xc8, 0x65, 0x2d, 0x87, 0x18, 0x2b, 0x32, 0xc6, 0x4b, 0xb0, 0x70, 0x3c, 0xf0, 0xe5, 0x18, + 0x37, 0x61, 0xf3, 0x6b, 0x57, 0xc8, 0x26, 0x73, 0xbe, 0x25, 0xd2, 0xee, 0x5b, 0x54, 0xf8, 0x9c, + 0x09, 0x8a, 0x3e, 0x80, 0x25, 0x47, 0x39, 0x10, 0x15, 0x63, 0xb7, 0xb0, 0x5f, 0x6e, 0xac, 0x66, + 0x1c, 0x5b, 0x91, 0x1e, 0x3f, 0x86, 0xe5, 0x6f, 0xb8, 0xcf, 0x3d, 0xde, 0x1b, 0xb7, 0xd9, 0x05, + 0x47, 0x0f, 0x60, 0x81, 0x71, 0x27, 0x06, 0xae, 0xc7, 0xc0, 0xd3, 0x17, 0xcf, 0x9a, 0xa7, 0xdc, + 0xa1, 0x96, 0xd6, 0x63, 0x13, 0x8a, 0x91, 0x08, 0xad, 0xc0, 0x7c, 0xbb, 0xa5, 0xe8, 0x29, 0x58, + 0xf3, 0xed, 0x16, 0xb6, 0x61, 0x51, 0xc7, 0x49, 0x69, 0x4a, 0x53, 0x0d, 0xda, 0x82, 0xc5, 0x3e, + 0x25, 0x9e, 0xec, 0x87, 0x8c, 0x85, 0x2f, 0x74, 0x04, 0x45, 0x19, 0xa6, 0xa1, 0xa8, 0x2a, 0x37, + 0xde, 0x89, 0x23, 0xa7, 0xf3, 0xb3, 0x62, 0x33, 0x7c, 0x02, 0x95, 0xb3, 0x70, 0x1a, 0x3e, 0xe3, + 0x4c, 0x12, 0x97, 0x25, 0x4d, 0xab, 0x43, 0x39, 0x2c, 0xb0, 0xe3, 0x3a, 0xba, 0x96, 0xd2, 0xd3, + 0x95, 0xc9, 0xdf, 0x77, 0x41, 0xe7, 0x25, 0xda, 0x2d, 0x61, 0x41, 0x68, 0xd2, 0x76, 0x04, 0xde, + 0x81, 0xed, 0x1c, 0x67, 0x9a, 0x4e, 0x3c, 0x06, 0x33, 0x67, 0x6c, 0xa2, 0x58, 0xdf, 0x03, 0xb2, + 0x23, 0x88, 0x9a, 0x57, 0x2a, 0x64, 0x44, 0xdf, 0x41, 0x5c, 0x44, 0xec, 0xf5, 0x7a, 0x4f, 0xd6, + 0xba, 0x9d, 0xa9, 0x43, 0xe0, 0x3f, 0x0c, 0xd8, 0xbb, 0x01, 0x14, 0xd5, 0x61, 0x23, 0x9e, 0xf6, + 0x8e, 0xae, 0xab, 0xdd, 0x0a, 0x0b, 0xb7, 0x50, 0xac, 0x6a, 0x45, 0x1a, 0xf4, 0x11, 0x6c, 0x0d, + 0x86, 0x42, 0x76, 0x5c, 0x66, 0x7b, 0x43, 0x27, 0x8d, 0x99, 0x57, 0x98, 0xcd, 0xa9, 0xb6, 0xad, + 0x95, 0x09, 0xea, 0x01, 0xac, 0xa6, 0xbe, 0x2f, 0xe1, 0xbe, 0xd4, 0x83, 0xbd, 0x60, 0xad, 0x24, + 0xe2, 0x73, 0xf7, 0x25, 0xc5, 0x3f, 0xc3, 0x4e, 0x6e, 0xb6, 0xe1, 0x80, 0xfe, 0x08, 0x1b, 0x69, + 0xce, 0xb4, 0x34, 0x22, 0xed, 0xf0, 0x86, 0xa4, 0x69, 0x94, 0x85, 0xec, 0x6c, 0xc3, 0x04, 0x6e, + 0xc1, 0xfd, 0x9b, 0x60, 0xd1, 0x1d, 0x28, 0x65, 0xc9, 0x4a, 0x04, 0xd8, 0x86, 0xd5, 0x10, 0x43, + 0x23, 0x9e, 0xcf, 0x66, 0x34, 0xfb, 0xde, 0xd5, 0xbc, 0x33, 0xf0, 0xbc, 0x0e, 0x9f, 0x40, 0xe5, + 0x3a, 0xf3, 0xb7, 0x1f, 0xe3, 0x1e, 0xac, 0x25, 0x3e, 0xc2, 0x1a, 0xcf, 0x67, 0x71, 0x8d, 0x67, + 0xe5, 0x3c, 0x83, 0xe0, 0x5f, 0x0b, 0xb0, 0x7d, 0x2d, 0x02, 0x7d, 0x0a, 0xb7, 0x28, 0x1b, 0xcd, + 0xf8, 0x08, 0xb2, 0x88, 0xda, 0x31, 0x1b, 0x89, 0x63, 0x26, 0x83, 0xb1, 0xa5, 0x90, 0xe8, 0x7d, + 0x58, 0x1c, 0xf0, 0x21, 0x93, 0x7a, 0x1c, 0xcb, 0x8d, 0x95, 0xd8, 0xc7, 0xb3, 0xa9, 0xd8, 0x0a, + 0xb5, 0xe8, 0x30, 0xd9, 0x74, 0x05, 0x65, 0xb8, 0x91, 0xd9, 0x74, 0xe7, 0x3e, 0xb5, 0xe3, 0x6d, + 0x87, 0x5e, 0x40, 0x99, 0x30, 0xc6, 0x25, 0x89, 0xb6, 0xee, 0x14, 0xf2, 0xf0, 0x06, 0xf9, 0x35, + 0x13, 0x94, 0x4e, 0x33, 0xed, 0xc7, 0x7c, 0x0c, 0xa5, 0xb8, 0x00, 0xb4, 0x06, 0x85, 0x4b, 0x3a, + 0x0e, 0x77, 0xde, 0xf4, 0x27, 0xda, 0x84, 0x85, 0x11, 0xf1, 0x86, 0x34, 0xdc, 0x79, 0xfa, 0xf1, + 0xf1, 0xfc, 0x13, 0xc3, 0xfc, 0x04, 0xd6, 0xb2, 0x9e, 0xdf, 0x06, 0x8f, 0xfb, 0xb0, 0xa0, 0xf8, + 0x40, 0xef, 0xc1, 0x4a, 0xd2, 0x64, 0x9f, 0xc8, 0x7e, 0x88, 0xbf, 0x1d, 0x4b, 0xcf, 0x88, 0xec, + 0xa3, 0x1d, 0x28, 0xf5, 0xb9, 0x90, 0xda, 0x22, 0xbc, 0x59, 0x53, 0x41, 0xa4, 0x0c, 0x28, 0x71, + 0x3a, 0x9c, 0x79, 0x7a, 0x09, 0x17, 0xad, 0xe2, 0x54, 0xf0, 0x9c, 0x79, 0x63, 0x1c, 0x00, 0x24, + 0x84, 0xfe, 0x2f, 0xe1, 0x76, 0xa1, 0xec, 0xd3, 0x60, 0xe0, 0x0a, 0xa1, 0x7a, 0xa1, 0x0f, 0x64, + 0x5a, 0xd4, 0xf8, 0x1c, 0x96, 0xf5, 0x35, 0x0e, 0x14, 0x3f, 0xe8, 0x11, 0x14, 0xa3, 0xeb, 0x8c, + 0x2a, 0x71, 0xd3, 0x32, 0x07, 0xdb, 0x4c, 0x46, 0x45, 0x1f, 0xc9, 0xb9, 0xc6, 0x6f, 0x05, 0x58, + 0x4e, 0x1f, 0x54, 0xf4, 0x25, 0x6c, 0x7d, 0x41, 0x65, 0xde, 0x9f, 0x8f, 0x0c, 0xd8, 0x9c, 0x79, + 0x91, 0xf1, 0x1c, 0x6a, 0xc2, 0x72, 0xfa, 0x02, 0x5f, 0xc1, 0xbf, 0x1b, 0xbf, 0xf3, 0x0e, 0x35, + 0x9e, 0xfb, 0xd0, 0x40, 0x54, 0x25, 0x93, 0xb3, 0xa5, 0xd0, 0x5e, 0x0c, 0xbe, 0x7e, 0xf3, 0x9b, + 0xf7, 0x67, 0x1b, 0x45, 0x81, 0x50, 0x13, 0x8a, 0xd1, 0x54, 0xa7, 0xc8, 0xcb, 0x6c, 0x1c, 0x73, + 0x3b, 0x47, 0x13, 0xbb, 0xf8, 0x01, 0xd6, 0xaf, 0x1c, 0x49, 0x74, 0x2f, 0x1d, 0x3f, 0xf7, 0x1a, + 0x9b, 0x78, 0x96, 0x49, 0xe4, 0xfd, 0xe9, 0x57, 0xaf, 0xdf, 0x54, 0x8d, 0xbf, 0xde, 0x54, 0xe7, + 0x7e, 0x99, 0x54, 0x8d, 0xd7, 0x93, 0xaa, 0xf1, 0xe7, 0xa4, 0x6a, 0xfc, 0x33, 0xa9, 0x1a, 0xaf, + 0xfe, 0xad, 0xce, 0x7d, 0x77, 0x70, 0xf9, 0x44, 0xd4, 0x5c, 0x5e, 0xbf, 0x1c, 0x76, 0xa9, 0x47, + 0x65, 0xdd, 0xbf, 0xec, 0xd5, 0x89, 0xef, 0x8a, 0xba, 0xfe, 0xb4, 0x7d, 0xd5, 0x97, 0x7a, 0x18, + 0xa7, 0xbb, 0xa8, 0xfe, 0x62, 0x3e, 0xfc, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xe2, 0xf2, 0x09, 0x79, + 0xa7, 0x0a, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1/api.proto b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1/api.proto index d22da05b2a03..f44e7cebd2d8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1/api.proto +++ b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1/api.proto @@ -1,7 +1,8 @@ -// To regenerate api.pb.go run hack/update-generated-device-plugin.sh +// To regenerate api.pb.go run `hack/update-codegen.sh protobindings` syntax = "proto3"; package v1beta1; +option go_package = "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"; import "github.com/gogo/protobuf/gogoproto/gogo.proto"; diff --git a/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/dra/v1alpha1/api.pb.go b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/dra/v1alpha1/api.pb.go index bd50df30554c..de08bd6933ac 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/dra/v1alpha1/api.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/dra/v1alpha1/api.pb.go @@ -17,7 +17,7 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: api.proto -package dra +package v1alpha1 import ( context "context" @@ -294,29 +294,31 @@ func init() { func init() { proto.RegisterFile("api.proto", fileDescriptor_00212fb1f9d3bf1c) } var fileDescriptor_00212fb1f9d3bf1c = []byte{ - // 348 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x52, 0x31, 0x4f, 0x32, 0x41, - 0x10, 0x65, 0x3f, 0xc8, 0x17, 0x6e, 0x4c, 0x34, 0x59, 0x63, 0x72, 0x39, 0xe0, 0xc0, 0x8b, 0x0a, - 0x8d, 0x47, 0xd0, 0xde, 0xc2, 0x58, 0x58, 0x11, 0x73, 0x09, 0x8d, 0x0d, 0x59, 0x76, 0x47, 0x58, - 0xc3, 0xdd, 0xae, 0x77, 0x1c, 0xb5, 0x3f, 0xc1, 0xd6, 0xca, 0xbf, 0x43, 0x69, 0x49, 0x29, 0xe7, - 0x1f, 0x31, 0xec, 0x49, 0x8c, 0x0a, 0xa1, 0xb3, 0xdb, 0x79, 0xf3, 0x66, 0xde, 0x9b, 0x99, 0x05, - 0x8b, 0x69, 0xe9, 0xeb, 0x58, 0x4d, 0x14, 0x2d, 0x4f, 0x3b, 0x6c, 0xac, 0x47, 0xac, 0xe3, 0x9c, - 0x0e, 0xe5, 0x64, 0x94, 0x0e, 0x7c, 0xae, 0xc2, 0xf6, 0x50, 0x0d, 0x55, 0xdb, 0x10, 0x06, 0xe9, - 0x9d, 0x89, 0x4c, 0x60, 0x5e, 0x79, 0xa1, 0xf7, 0x42, 0xc0, 0xe9, 0x2a, 0x81, 0x37, 0x31, 0x6a, - 0x16, 0x63, 0x80, 0x89, 0x4a, 0x63, 0x8e, 0x01, 0x3e, 0xa4, 0x98, 0x4c, 0x68, 0x15, 0xac, 0x88, - 0x85, 0x98, 0x68, 0xc6, 0xd1, 0x26, 0x0d, 0xd2, 0xb2, 0x82, 0x2f, 0x80, 0x56, 0xc0, 0xe2, 0x63, - 0x26, 0xc3, 0x7e, 0x2a, 0x85, 0xfd, 0xcf, 0x64, 0xcb, 0x06, 0xe8, 0x49, 0x41, 0x6b, 0x00, 0x79, - 0x72, 0xc9, 0xb7, 0x8b, 0x79, 0xad, 0x41, 0xba, 0x2c, 0x44, 0xda, 0x84, 0xbd, 0xf8, 0x53, 0xac, - 0x3f, 0x62, 0x91, 0x18, 0xa3, 0x5d, 0x32, 0x9c, 0xdd, 0x15, 0x7c, 0x6d, 0x50, 0xef, 0x02, 0x2a, - 0x6b, 0x0d, 0x26, 0x5a, 0x45, 0x09, 0xd2, 0x3a, 0xec, 0x70, 0x21, 0xfb, 0x02, 0xa7, 0x92, 0x63, - 0x62, 0x93, 0x46, 0xb1, 0x65, 0x05, 0xc0, 0x85, 0xbc, 0xca, 0x11, 0xef, 0x99, 0x40, 0x75, 0xd9, - 0xa0, 0x17, 0xe9, 0xbf, 0x9e, 0xf1, 0x87, 0xb7, 0xd2, 0x2f, 0x6f, 0x75, 0xa8, 0x6d, 0xb0, 0x96, - 0x4f, 0x77, 0x36, 0x27, 0x50, 0x5a, 0x32, 0xa8, 0x80, 0xfd, 0x35, 0x5b, 0xa0, 0x47, 0xfe, 0xea, - 0xf0, 0xfe, 0xe6, 0x2b, 0x3a, 0xc7, 0x5b, 0x58, 0xb9, 0x98, 0x57, 0xa0, 0xf7, 0x70, 0xb0, 0xd6, - 0x0f, 0x3d, 0xf9, 0xde, 0x61, 0xd3, 0x2e, 0x9d, 0xe6, 0x56, 0xde, 0x4a, 0xeb, 0xf2, 0x70, 0xb6, - 0x70, 0xc9, 0x7c, 0xe1, 0x16, 0x1e, 0x33, 0x97, 0xcc, 0x32, 0x97, 0xbc, 0x66, 0x2e, 0x79, 0xcb, - 0x5c, 0xf2, 0xf4, 0xee, 0x16, 0x6e, 0x8b, 0x22, 0x66, 0x83, 0xff, 0xe6, 0x8f, 0x9e, 0x7f, 0x04, - 0x00, 0x00, 0xff, 0xff, 0xab, 0xb6, 0x01, 0xe3, 0xe9, 0x02, 0x00, 0x00, + // 374 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x52, 0x31, 0x6f, 0xda, 0x40, + 0x14, 0xe6, 0x0a, 0xaa, 0xf0, 0x55, 0x6a, 0xa5, 0xab, 0x2a, 0x59, 0x06, 0x0c, 0xb2, 0x68, 0x61, + 0xa9, 0x2d, 0xda, 0xa5, 0x53, 0x07, 0xd4, 0xa1, 0x13, 0xaa, 0x2c, 0xb1, 0x74, 0x41, 0x67, 0xdf, + 0x8b, 0xb9, 0x60, 0xfb, 0x2e, 0x3e, 0x9b, 0x39, 0x3f, 0x21, 0x6b, 0xa6, 0xfc, 0x1d, 0xc6, 0x8c, + 0x8c, 0xc1, 0xf9, 0x23, 0x11, 0xe7, 0x58, 0x51, 0x12, 0x10, 0x5b, 0xb6, 0x7b, 0xdf, 0xfb, 0xde, + 0xfb, 0xbe, 0xf7, 0xde, 0x61, 0x83, 0x4a, 0xee, 0xca, 0x4c, 0xe4, 0x82, 0xb4, 0xd7, 0x13, 0x1a, + 0xcb, 0x25, 0x9d, 0x58, 0xdf, 0x23, 0x9e, 0x2f, 0x8b, 0xc0, 0x0d, 0x45, 0xe2, 0x45, 0x22, 0x12, + 0x9e, 0x26, 0x04, 0xc5, 0x99, 0x8e, 0x74, 0xa0, 0x5f, 0x55, 0xa1, 0x73, 0x83, 0xb0, 0x35, 0x13, + 0x0c, 0xfe, 0x65, 0x20, 0x69, 0x06, 0x3e, 0x28, 0x51, 0x64, 0x21, 0xf8, 0x70, 0x51, 0x80, 0xca, + 0x49, 0x17, 0x1b, 0x29, 0x4d, 0x40, 0x49, 0x1a, 0x82, 0x89, 0x06, 0x68, 0x6c, 0xf8, 0x4f, 0x00, + 0xe9, 0x60, 0x23, 0x8c, 0x29, 0x4f, 0x16, 0x05, 0x67, 0xe6, 0x3b, 0x9d, 0x6d, 0x6b, 0x60, 0xce, + 0x19, 0xe9, 0x61, 0x5c, 0x25, 0xf7, 0x7c, 0xb3, 0x59, 0xd5, 0x6a, 0x64, 0x46, 0x13, 0x20, 0x23, + 0xfc, 0x29, 0x7b, 0x14, 0x5b, 0x2c, 0x69, 0xca, 0x62, 0x30, 0x5b, 0x9a, 0xf3, 0xb1, 0x86, 0xff, + 0x6a, 0xd4, 0xf9, 0x8d, 0x3b, 0x07, 0x0d, 0x2a, 0x29, 0x52, 0x05, 0xa4, 0x8f, 0x3f, 0x84, 0x8c, + 0x2f, 0x18, 0xac, 0x79, 0x08, 0xca, 0x44, 0x83, 0xe6, 0xd8, 0xf0, 0x71, 0xc8, 0xf8, 0x9f, 0x0a, + 0x71, 0xae, 0x11, 0xee, 0xee, 0x1b, 0xcc, 0x53, 0xf9, 0xd6, 0x33, 0xbe, 0xf0, 0xd6, 0x7a, 0xe5, + 0xad, 0x8f, 0x7b, 0x47, 0xac, 0x55, 0xd3, 0xfd, 0xd8, 0x22, 0xdc, 0xda, 0x33, 0x08, 0xc3, 0x9f, + 0x0f, 0x6c, 0x81, 0x0c, 0xdd, 0xfa, 0xf0, 0xee, 0xf1, 0x2b, 0x5a, 0x5f, 0x4f, 0xb0, 0x2a, 0x31, + 0xa7, 0x41, 0xce, 0xf1, 0x97, 0x83, 0x7e, 0xc8, 0xb7, 0xe7, 0x1d, 0x8e, 0xed, 0xd2, 0x1a, 0x9d, + 0xe4, 0xd5, 0x5a, 0xd3, 0xe9, 0x66, 0x67, 0xa3, 0xed, 0xce, 0x6e, 0x5c, 0x96, 0x36, 0xda, 0x94, + 0x36, 0xba, 0x2d, 0x6d, 0x74, 0x57, 0xda, 0xe8, 0xea, 0xde, 0x6e, 0xfc, 0x1f, 0xae, 0x7e, 0x29, + 0x97, 0x0b, 0x6f, 0x55, 0x04, 0x10, 0x43, 0xee, 0xc9, 0x55, 0xe4, 0x51, 0xc9, 0x95, 0xc7, 0x32, + 0xea, 0xd5, 0x1a, 0xc1, 0x7b, 0xfd, 0x89, 0x7f, 0x3e, 0x04, 0x00, 0x00, 0xff, 0xff, 0xf6, 0xcb, + 0x4e, 0x59, 0x0a, 0x03, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/dra/v1alpha1/api.proto b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/dra/v1alpha1/api.proto index 2ba5c5c34d46..3132b6b5f67b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/dra/v1alpha1/api.proto +++ b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/dra/v1alpha1/api.proto @@ -14,15 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -// To regenerate api.pb.go run hack/update-generated-kubelet-plugin-registration.sh +// To regenerate api.pb.go run `hack/update-codegen.sh protobindings` syntax = "proto3"; package v1alpha1; +option go_package = "k8s.io/kubelet/pkg/apis/dra/v1alpha1"; import "github.com/gogo/protobuf/gogoproto/gogo.proto"; -option go_package = "dra"; option (gogoproto.goproto_stringer_all) = false; option (gogoproto.stringer_all) = true; option (gogoproto.goproto_getters_all) = true; @@ -78,4 +78,4 @@ message NodeUnprepareResourceRequest { message NodeUnprepareResourceResponse { // Intentionally empty. -} \ No newline at end of file +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/pluginregistration/v1/api.pb.go b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/pluginregistration/v1/api.pb.go index a1aa7469e4af..ecfc84317696 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/pluginregistration/v1/api.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/pluginregistration/v1/api.pb.go @@ -17,37 +17,23 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: api.proto -/* -Package pluginregistration is a generated protocol buffer package. - -It is generated from these files: - - api.proto - -It has these top-level messages: - - PluginInfo - RegistrationStatus - RegistrationStatusResponse - InfoRequest -*/ -package pluginregistration - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "github.com/gogo/protobuf/gogoproto" +package v1 import ( - context "golang.org/x/net/context" + context "context" + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" ) -import strings "strings" -import reflect "reflect" - -import io "io" - // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf @@ -57,7 +43,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // PluginInfo is the message sent from a plugin to the Kubelet pluginwatcher for plugin registration type PluginInfo struct { @@ -80,12 +66,42 @@ type PluginInfo struct { // The Kubelet component communicating with the plugin should be able // to choose any preferred version from this list, or returns an error // if none of the listed versions is supported. - SupportedVersions []string `protobuf:"bytes,4,rep,name=supported_versions,json=supportedVersions" json:"supported_versions,omitempty"` + SupportedVersions []string `protobuf:"bytes,4,rep,name=supported_versions,json=supportedVersions,proto3" json:"supported_versions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PluginInfo) Reset() { *m = PluginInfo{} } +func (*PluginInfo) ProtoMessage() {} +func (*PluginInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{0} +} +func (m *PluginInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PluginInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PluginInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PluginInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_PluginInfo.Merge(m, src) +} +func (m *PluginInfo) XXX_Size() int { + return m.Size() +} +func (m *PluginInfo) XXX_DiscardUnknown() { + xxx_messageInfo_PluginInfo.DiscardUnknown(m) } -func (m *PluginInfo) Reset() { *m = PluginInfo{} } -func (*PluginInfo) ProtoMessage() {} -func (*PluginInfo) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{0} } +var xxx_messageInfo_PluginInfo proto.InternalMessageInfo func (m *PluginInfo) GetType() string { if m != nil { @@ -120,12 +136,42 @@ type RegistrationStatus struct { // True if plugin gets registered successfully at Kubelet PluginRegistered bool `protobuf:"varint,1,opt,name=plugin_registered,json=pluginRegistered,proto3" json:"plugin_registered,omitempty"` // Error message in case plugin fails to register, empty string otherwise - Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *RegistrationStatus) Reset() { *m = RegistrationStatus{} } -func (*RegistrationStatus) ProtoMessage() {} -func (*RegistrationStatus) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{1} } +func (m *RegistrationStatus) Reset() { *m = RegistrationStatus{} } +func (*RegistrationStatus) ProtoMessage() {} +func (*RegistrationStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{1} +} +func (m *RegistrationStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RegistrationStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RegistrationStatus.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RegistrationStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_RegistrationStatus.Merge(m, src) +} +func (m *RegistrationStatus) XXX_Size() int { + return m.Size() +} +func (m *RegistrationStatus) XXX_DiscardUnknown() { + xxx_messageInfo_RegistrationStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_RegistrationStatus proto.InternalMessageInfo func (m *RegistrationStatus) GetPluginRegistered() bool { if m != nil { @@ -143,19 +189,79 @@ func (m *RegistrationStatus) GetError() string { // RegistrationStatusResponse is sent by plugin to kubelet in response to RegistrationStatus RPC type RegistrationStatusResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RegistrationStatusResponse) Reset() { *m = RegistrationStatusResponse{} } +func (*RegistrationStatusResponse) ProtoMessage() {} +func (*RegistrationStatusResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{2} +} +func (m *RegistrationStatusResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RegistrationStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RegistrationStatusResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RegistrationStatusResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RegistrationStatusResponse.Merge(m, src) +} +func (m *RegistrationStatusResponse) XXX_Size() int { + return m.Size() +} +func (m *RegistrationStatusResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RegistrationStatusResponse.DiscardUnknown(m) } -func (m *RegistrationStatusResponse) Reset() { *m = RegistrationStatusResponse{} } -func (*RegistrationStatusResponse) ProtoMessage() {} -func (*RegistrationStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{2} } +var xxx_messageInfo_RegistrationStatusResponse proto.InternalMessageInfo // InfoRequest is the empty request message from Kubelet type InfoRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InfoRequest) Reset() { *m = InfoRequest{} } +func (*InfoRequest) ProtoMessage() {} +func (*InfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{3} +} +func (m *InfoRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InfoRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *InfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_InfoRequest.Merge(m, src) +} +func (m *InfoRequest) XXX_Size() int { + return m.Size() +} +func (m *InfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_InfoRequest.DiscardUnknown(m) } -func (m *InfoRequest) Reset() { *m = InfoRequest{} } -func (*InfoRequest) ProtoMessage() {} -func (*InfoRequest) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{3} } +var xxx_messageInfo_InfoRequest proto.InternalMessageInfo func init() { proto.RegisterType((*PluginInfo)(nil), "pluginregistration.PluginInfo") @@ -164,6 +270,35 @@ func init() { proto.RegisterType((*InfoRequest)(nil), "pluginregistration.InfoRequest") } +func init() { proto.RegisterFile("api.proto", fileDescriptor_00212fb1f9d3bf1c) } + +var fileDescriptor_00212fb1f9d3bf1c = []byte{ + // 365 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x52, 0xc1, 0x4a, 0xeb, 0x40, + 0x14, 0xcd, 0xbc, 0xf6, 0xbd, 0xd7, 0x8e, 0x0a, 0x76, 0x70, 0x11, 0x82, 0x8c, 0x25, 0x0b, 0x29, + 0x48, 0x13, 0xd4, 0x8d, 0x6b, 0x37, 0x22, 0x8a, 0x48, 0x04, 0x05, 0x37, 0x25, 0xb1, 0xb7, 0x71, + 0x68, 0x3b, 0x33, 0xce, 0x4c, 0x0a, 0x5d, 0xe9, 0x27, 0xf8, 0x59, 0x5d, 0x8a, 0x2b, 0x97, 0x36, + 0xfe, 0x88, 0x74, 0x52, 0x62, 0x21, 0x5d, 0xb8, 0xbb, 0xe7, 0xdc, 0x73, 0xef, 0xdc, 0x73, 0x18, + 0xdc, 0x8c, 0x25, 0x0b, 0xa4, 0x12, 0x46, 0x10, 0x22, 0x47, 0x59, 0xca, 0xb8, 0x82, 0x94, 0x69, + 0xa3, 0x62, 0xc3, 0x04, 0xf7, 0xba, 0x29, 0x33, 0x8f, 0x59, 0x12, 0x3c, 0x88, 0x71, 0x98, 0x8a, + 0x54, 0x84, 0x56, 0x9a, 0x64, 0x03, 0x8b, 0x2c, 0xb0, 0x55, 0xb1, 0xc2, 0x7f, 0xc6, 0xf8, 0xda, + 0x2e, 0x39, 0xe7, 0x03, 0x41, 0x08, 0xae, 0x9b, 0xa9, 0x04, 0x17, 0xb5, 0x51, 0xa7, 0x19, 0xd9, + 0x7a, 0xc1, 0xf1, 0x78, 0x0c, 0xee, 0x9f, 0x82, 0x5b, 0xd4, 0xc4, 0xc3, 0x0d, 0xe0, 0x7d, 0x29, + 0x18, 0x37, 0x6e, 0xcd, 0xf2, 0x25, 0x26, 0x5d, 0x4c, 0x74, 0x26, 0xa5, 0x50, 0x06, 0xfa, 0xbd, + 0x09, 0x28, 0xcd, 0x04, 0xd7, 0x6e, 0xbd, 0x5d, 0xeb, 0x34, 0xa3, 0x56, 0xd9, 0xb9, 0x5d, 0x36, + 0xfc, 0x3b, 0x4c, 0xa2, 0x95, 0xfb, 0x6f, 0x4c, 0x6c, 0x32, 0x4d, 0x0e, 0x70, 0xab, 0xf0, 0xd6, + 0x2b, 0xcc, 0x81, 0x82, 0xbe, 0xbd, 0xaa, 0x11, 0x6d, 0x17, 0x8d, 0xa8, 0xe4, 0xc9, 0x0e, 0xfe, + 0x0b, 0x4a, 0x09, 0xb5, 0x3c, 0xb1, 0x00, 0xfe, 0x2e, 0xf6, 0xaa, 0x8b, 0x23, 0xd0, 0x52, 0x70, + 0x0d, 0xfe, 0x16, 0xde, 0x58, 0x38, 0x8e, 0xe0, 0x29, 0x03, 0x6d, 0x8e, 0xde, 0x11, 0xde, 0x5c, + 0x55, 0x93, 0x4b, 0xfc, 0xff, 0x0c, 0x8c, 0x0d, 0x65, 0x2f, 0xa8, 0xc6, 0x1c, 0xac, 0x0c, 0x7b, + 0x74, 0x9d, 0xe0, 0x27, 0x55, 0xdf, 0x21, 0x06, 0xbb, 0x57, 0xc2, 0xb0, 0xc1, 0x74, 0x8d, 0xd5, + 0xfd, 0x75, 0xd3, 0x55, 0x9d, 0x17, 0xfc, 0x4e, 0x57, 0x3a, 0x74, 0x4e, 0x2f, 0x66, 0x73, 0x8a, + 0x3e, 0xe6, 0xd4, 0x79, 0xc9, 0x29, 0x9a, 0xe5, 0x14, 0xbd, 0xe5, 0x14, 0x7d, 0xe6, 0x14, 0xbd, + 0x7e, 0x51, 0xe7, 0xbe, 0x3b, 0x3c, 0xd1, 0x01, 0x13, 0xe1, 0x30, 0x4b, 0x60, 0x04, 0x26, 0x94, + 0xc3, 0x34, 0x8c, 0x25, 0xd3, 0x61, 0xf5, 0x99, 0x70, 0x72, 0x98, 0xfc, 0xb3, 0xff, 0xe5, 0xf8, + 0x3b, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x5f, 0xd4, 0xb2, 0x7f, 0x02, 0x00, 0x00, +} + // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn @@ -172,8 +307,9 @@ var _ grpc.ClientConn // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 -// Client API for Registration service - +// RegistrationClient is the client API for Registration service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type RegistrationClient interface { GetInfo(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*PluginInfo, error) NotifyRegistrationStatus(ctx context.Context, in *RegistrationStatus, opts ...grpc.CallOption) (*RegistrationStatusResponse, error) @@ -189,7 +325,7 @@ func NewRegistrationClient(cc *grpc.ClientConn) RegistrationClient { func (c *registrationClient) GetInfo(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*PluginInfo, error) { out := new(PluginInfo) - err := grpc.Invoke(ctx, "/pluginregistration.Registration/GetInfo", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/pluginregistration.Registration/GetInfo", in, out, opts...) if err != nil { return nil, err } @@ -198,20 +334,30 @@ func (c *registrationClient) GetInfo(ctx context.Context, in *InfoRequest, opts func (c *registrationClient) NotifyRegistrationStatus(ctx context.Context, in *RegistrationStatus, opts ...grpc.CallOption) (*RegistrationStatusResponse, error) { out := new(RegistrationStatusResponse) - err := grpc.Invoke(ctx, "/pluginregistration.Registration/NotifyRegistrationStatus", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/pluginregistration.Registration/NotifyRegistrationStatus", in, out, opts...) if err != nil { return nil, err } return out, nil } -// Server API for Registration service - +// RegistrationServer is the server API for Registration service. type RegistrationServer interface { GetInfo(context.Context, *InfoRequest) (*PluginInfo, error) NotifyRegistrationStatus(context.Context, *RegistrationStatus) (*RegistrationStatusResponse, error) } +// UnimplementedRegistrationServer can be embedded to have forward compatible implementations. +type UnimplementedRegistrationServer struct { +} + +func (*UnimplementedRegistrationServer) GetInfo(ctx context.Context, req *InfoRequest) (*PluginInfo, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetInfo not implemented") +} +func (*UnimplementedRegistrationServer) NotifyRegistrationStatus(ctx context.Context, req *RegistrationStatus) (*RegistrationStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NotifyRegistrationStatus not implemented") +} + func RegisterRegistrationServer(s *grpc.Server, srv RegistrationServer) { s.RegisterService(&_Registration_serviceDesc, srv) } @@ -272,7 +418,7 @@ var _Registration_serviceDesc = grpc.ServiceDesc{ func (m *PluginInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -280,50 +426,52 @@ func (m *PluginInfo) Marshal() (dAtA []byte, err error) { } func (m *PluginInfo) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PluginInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Type) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintApi(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - } - if len(m.Name) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + if len(m.SupportedVersions) > 0 { + for iNdEx := len(m.SupportedVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SupportedVersions[iNdEx]) + copy(dAtA[i:], m.SupportedVersions[iNdEx]) + i = encodeVarintApi(dAtA, i, uint64(len(m.SupportedVersions[iNdEx]))) + i-- + dAtA[i] = 0x22 + } } if len(m.Endpoint) > 0 { - dAtA[i] = 0x1a - i++ + i -= len(m.Endpoint) + copy(dAtA[i:], m.Endpoint) i = encodeVarintApi(dAtA, i, uint64(len(m.Endpoint))) - i += copy(dAtA[i:], m.Endpoint) + i-- + dAtA[i] = 0x1a } - if len(m.SupportedVersions) > 0 { - for _, s := range m.SupportedVersions { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintApi(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *RegistrationStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -331,33 +479,39 @@ func (m *RegistrationStatus) Marshal() (dAtA []byte, err error) { } func (m *RegistrationStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RegistrationStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if len(m.Error) > 0 { + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintApi(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x12 + } if m.PluginRegistered { - dAtA[i] = 0x8 - i++ + i-- if m.PluginRegistered { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - } - if len(m.Error) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintApi(dAtA, i, uint64(len(m.Error))) - i += copy(dAtA[i:], m.Error) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *RegistrationStatusResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -365,17 +519,22 @@ func (m *RegistrationStatusResponse) Marshal() (dAtA []byte, err error) { } func (m *RegistrationStatusResponse) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RegistrationStatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - return i, nil + return len(dAtA) - i, nil } func (m *InfoRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -383,23 +542,33 @@ func (m *InfoRequest) Marshal() (dAtA []byte, err error) { } func (m *InfoRequest) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - return i, nil + return len(dAtA) - i, nil } func encodeVarintApi(dAtA []byte, offset int, v uint64) int { + offset -= sovApi(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *PluginInfo) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -424,6 +593,9 @@ func (m *PluginInfo) Size() (n int) { } func (m *RegistrationStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.PluginRegistered { @@ -437,26 +609,25 @@ func (m *RegistrationStatus) Size() (n int) { } func (m *RegistrationStatusResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l return n } func (m *InfoRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l return n } func sovApi(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozApi(x uint64) (n int) { return sovApi(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -526,7 +697,7 @@ func (m *PluginInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -554,7 +725,7 @@ func (m *PluginInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -564,6 +735,9 @@ func (m *PluginInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthApi } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -583,7 +757,7 @@ func (m *PluginInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -593,6 +767,9 @@ func (m *PluginInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthApi } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -612,7 +789,7 @@ func (m *PluginInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -622,6 +799,9 @@ func (m *PluginInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthApi } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -641,7 +821,7 @@ func (m *PluginInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -651,6 +831,9 @@ func (m *PluginInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthApi } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -662,7 +845,7 @@ func (m *PluginInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { @@ -692,7 +875,7 @@ func (m *RegistrationStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -720,7 +903,7 @@ func (m *RegistrationStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -740,7 +923,7 @@ func (m *RegistrationStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -750,6 +933,9 @@ func (m *RegistrationStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthApi } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -761,7 +947,7 @@ func (m *RegistrationStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { @@ -791,7 +977,7 @@ func (m *RegistrationStatusResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -811,7 +997,7 @@ func (m *RegistrationStatusResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { @@ -841,7 +1027,7 @@ func (m *InfoRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -861,7 +1047,7 @@ func (m *InfoRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { @@ -879,6 +1065,7 @@ func (m *InfoRequest) Unmarshal(dAtA []byte) error { func skipApi(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 + depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -910,10 +1097,8 @@ func skipApi(dAtA []byte) (n int, err error) { break } } - return iNdEx, nil case 1: iNdEx += 8 - return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -930,81 +1115,34 @@ func skipApi(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthApi } - return iNdEx, nil + iNdEx += length case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowApi - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipApi(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil + depth++ case 4: - return iNdEx, nil + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupApi + } + depth-- case 5: iNdEx += 4 - return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } + if iNdEx < 0 { + return 0, ErrInvalidLengthApi + } + if depth == 0 { + return iNdEx, nil + } } - panic("unreachable") + return 0, io.ErrUnexpectedEOF } var ( - ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowApi = fmt.Errorf("proto: integer overflow") + ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowApi = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupApi = fmt.Errorf("proto: unexpected end of group") ) - -func init() { proto.RegisterFile("api.proto", fileDescriptorApi) } - -var fileDescriptorApi = []byte{ - // 337 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x52, 0x41, 0x4b, 0x33, 0x31, - 0x14, 0xdc, 0x7c, 0xed, 0xa7, 0xed, 0x53, 0xc1, 0x06, 0x0f, 0xcb, 0x52, 0x62, 0xd9, 0x83, 0x14, - 0xa4, 0x5b, 0xd0, 0x7f, 0xe0, 0x45, 0x04, 0x11, 0x89, 0xa0, 0xc7, 0xb2, 0xb5, 0xaf, 0x6b, 0xc0, - 0x26, 0x31, 0xc9, 0x0a, 0x3d, 0xe9, 0x4f, 0xf0, 0x67, 0xf5, 0x28, 0x9e, 0x3c, 0xda, 0xf5, 0x8f, - 0x48, 0xb3, 0x65, 0x2d, 0xb4, 0x07, 0x6f, 0x6f, 0xe6, 0x4d, 0x1e, 0x33, 0x43, 0xa0, 0x99, 0x6a, - 0x91, 0x68, 0xa3, 0x9c, 0xa2, 0x54, 0x3f, 0xe6, 0x99, 0x90, 0x06, 0x33, 0x61, 0x9d, 0x49, 0x9d, - 0x50, 0x32, 0xea, 0x65, 0xc2, 0x3d, 0xe4, 0xc3, 0xe4, 0x5e, 0x4d, 0xfa, 0x99, 0xca, 0x54, 0xdf, - 0x4b, 0x87, 0xf9, 0xd8, 0x23, 0x0f, 0xfc, 0x54, 0x9e, 0x88, 0x5f, 0x00, 0xae, 0xfd, 0x91, 0x0b, - 0x39, 0x56, 0x94, 0x42, 0xdd, 0x4d, 0x35, 0x86, 0xa4, 0x43, 0xba, 0x4d, 0xee, 0xe7, 0x05, 0x27, - 0xd3, 0x09, 0x86, 0xff, 0x4a, 0x6e, 0x31, 0xd3, 0x08, 0x1a, 0x28, 0x47, 0x5a, 0x09, 0xe9, 0xc2, - 0x9a, 0xe7, 0x2b, 0x4c, 0x7b, 0x40, 0x6d, 0xae, 0xb5, 0x32, 0x0e, 0x47, 0x83, 0x67, 0x34, 0x56, - 0x28, 0x69, 0xc3, 0x7a, 0xa7, 0xd6, 0x6d, 0xf2, 0x56, 0xb5, 0xb9, 0x5d, 0x2e, 0xe2, 0x3b, 0xa0, - 0x7c, 0xc5, 0xff, 0x8d, 0x4b, 0x5d, 0x6e, 0xe9, 0x31, 0xb4, 0xca, 0x6c, 0x83, 0x32, 0x1c, 0x1a, - 0x1c, 0x79, 0x57, 0x0d, 0xbe, 0x5f, 0x2e, 0x78, 0xc5, 0xd3, 0x03, 0xf8, 0x8f, 0xc6, 0x28, 0xb3, - 0xb4, 0x58, 0x82, 0xb8, 0x0d, 0xd1, 0xfa, 0x61, 0x8e, 0x56, 0x2b, 0x69, 0x31, 0xde, 0x83, 0x9d, - 0x45, 0x62, 0x8e, 0x4f, 0x39, 0x5a, 0x77, 0xf2, 0x41, 0x60, 0x77, 0x55, 0x4d, 0x2f, 0x61, 0xfb, - 0x1c, 0x9d, 0x2f, 0xe5, 0x30, 0x59, 0xaf, 0x39, 0x59, 0x79, 0x1c, 0xb1, 0x4d, 0x82, 0xdf, 0x56, - 0xe3, 0x80, 0x3a, 0x08, 0xaf, 0x94, 0x13, 0xe3, 0xe9, 0x86, 0xa8, 0x47, 0x9b, 0x5e, 0xaf, 0xeb, - 0xa2, 0xe4, 0x6f, 0xba, 0x2a, 0x61, 0x70, 0xd6, 0x9e, 0xcd, 0x19, 0xf9, 0x9c, 0xb3, 0xe0, 0xb5, - 0x60, 0x64, 0x56, 0x30, 0xf2, 0x5e, 0x30, 0xf2, 0x55, 0x30, 0xf2, 0xf6, 0xcd, 0x82, 0xe1, 0x96, - 0xff, 0x00, 0xa7, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xe4, 0xc0, 0xe3, 0x42, 0x50, 0x02, 0x00, - 0x00, -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/pluginregistration/v1/api.proto b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/pluginregistration/v1/api.proto index 5c4f6f801681..8972e7e82c0e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/pluginregistration/v1/api.proto +++ b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/pluginregistration/v1/api.proto @@ -1,7 +1,8 @@ -// To regenerate api.pb.go run hack/update-generated-kubelet-plugin-registration.sh +// To regenerate api.pb.go run `hack/update-codegen.sh protobindings` syntax = "proto3"; -package pluginregistration; +package pluginregistration; // This should have been v1. +option go_package = "k8s.io/kubelet/pkg/apis/pluginregistration/v1"; import "github.com/gogo/protobuf/gogoproto/gogo.proto"; diff --git a/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/pluginregistration/v1/constants.go b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/pluginregistration/v1/constants.go index 5efe743d368b..475c0404b9fe 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/pluginregistration/v1/constants.go +++ b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/pluginregistration/v1/constants.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package pluginregistration +package v1 const ( // CSIPlugin identifier for registered CSI plugins diff --git a/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/podresources/v1/api.pb.go b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/podresources/v1/api.pb.go index ac0924b2b166..e50e2e2bb022 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/podresources/v1/api.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/podresources/v1/api.pb.go @@ -592,41 +592,43 @@ func init() { func init() { proto.RegisterFile("api.proto", fileDescriptor_00212fb1f9d3bf1c) } var fileDescriptor_00212fb1f9d3bf1c = []byte{ - // 539 bytes of a gzipped FileDescriptorProto + // 571 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xc1, 0x6e, 0xd3, 0x40, - 0x10, 0xed, 0xda, 0x21, 0x6d, 0xa6, 0x29, 0x54, 0x0b, 0x22, 0x26, 0x4d, 0xdd, 0xc8, 0x5c, 0x22, - 0x01, 0xae, 0x1a, 0x04, 0xf7, 0xd2, 0x48, 0x28, 0x12, 0x8d, 0x60, 0x55, 0xae, 0x44, 0x8e, 0xbd, - 0x0d, 0x96, 0x12, 0xef, 0xe2, 0x5d, 0x47, 0x84, 0x13, 0x07, 0x3e, 0x80, 0x03, 0x67, 0xfe, 0x83, - 0x3f, 0xe8, 0x91, 0x23, 0x47, 0x1a, 0x7e, 0x04, 0xed, 0xda, 0x4e, 0x9d, 0x26, 0x01, 0xf5, 0xe4, - 0xd9, 0x79, 0x33, 0xb3, 0x6f, 0xe6, 0x8d, 0x17, 0x2a, 0x1e, 0x0f, 0x5d, 0x1e, 0x33, 0xc9, 0xb0, - 0x31, 0x39, 0xaa, 0x3f, 0x19, 0x86, 0xf2, 0x7d, 0x32, 0x70, 0x7d, 0x36, 0x3e, 0x1c, 0xb2, 0x21, - 0x3b, 0xd4, 0xd0, 0x20, 0x39, 0xd7, 0x27, 0x7d, 0xd0, 0x56, 0x9a, 0xe2, 0xec, 0xc3, 0xde, 0xf1, - 0x68, 0xc4, 0x7c, 0x4f, 0x7a, 0x83, 0x11, 0x25, 0x54, 0xb0, 0x24, 0xf6, 0xa9, 0x20, 0xf4, 0x43, - 0x42, 0x85, 0x74, 0xbe, 0x21, 0x68, 0xac, 0xc6, 0x05, 0x67, 0x91, 0xa0, 0xd8, 0x85, 0xcd, 0x80, - 0x4e, 0x42, 0x9f, 0x0a, 0x0b, 0x35, 0xcd, 0xd6, 0x76, 0xfb, 0x9e, 0x3b, 0x39, 0x72, 0x4f, 0x58, - 0x24, 0xbd, 0x30, 0xa2, 0x71, 0x27, 0xc5, 0x48, 0x1e, 0x84, 0x6b, 0xb0, 0xe9, 0xf3, 0xa4, 0x1f, - 0x06, 0xc2, 0x32, 0x9a, 0x66, 0xcb, 0x24, 0x65, 0x9f, 0x27, 0xdd, 0x40, 0xe0, 0x47, 0x50, 0x1e, - 0xd3, 0x31, 0x8b, 0xa7, 0x96, 0xa9, 0xeb, 0xdc, 0x5d, 0xa8, 0x73, 0xaa, 0x21, 0x92, 0x85, 0x38, - 0x0f, 0xa0, 0xf6, 0x2a, 0x14, 0xf2, 0x35, 0x0b, 0x96, 0x18, 0xbf, 0x01, 0x6b, 0x19, 0xca, 0xc8, - 0x3e, 0x83, 0x1d, 0xce, 0x82, 0x7e, 0x9c, 0x03, 0x19, 0xe5, 0x5d, 0x75, 0xd5, 0x42, 0x42, 0x95, - 0x17, 0x4e, 0xce, 0x47, 0xa8, 0x16, 0x51, 0x8c, 0xa1, 0x14, 0x79, 0x63, 0x6a, 0xa1, 0x26, 0x6a, - 0x55, 0x88, 0xb6, 0x71, 0x03, 0x2a, 0xea, 0x2b, 0xb8, 0xe7, 0x53, 0xcb, 0xd0, 0xc0, 0x95, 0x03, - 0x3f, 0x07, 0xf0, 0xf3, 0x56, 0x44, 0xd6, 0xe0, 0xfd, 0x85, 0x06, 0xaf, 0xee, 0x2e, 0x44, 0x3a, - 0xdf, 0x11, 0xe0, 0xe5, 0x90, 0x95, 0x04, 0x0a, 0x42, 0x18, 0x37, 0x14, 0xc2, 0x5c, 0x23, 0x44, - 0xe9, 0xff, 0x42, 0x48, 0xb8, 0x73, 0x0d, 0xc2, 0x07, 0xb0, 0x9d, 0x82, 0x7d, 0x39, 0xe5, 0x39, - 0x47, 0x48, 0x5d, 0x67, 0x53, 0x4e, 0x15, 0x7b, 0x11, 0x7e, 0x4a, 0xa7, 0x54, 0x22, 0xda, 0xc6, - 0x8f, 0x61, 0x4b, 0x32, 0xce, 0x46, 0x6c, 0xa8, 0xf4, 0x47, 0xb9, 0x28, 0x67, 0x99, 0xaf, 0x1b, - 0x9d, 0x33, 0x32, 0x8f, 0x70, 0xbe, 0x20, 0xd8, 0xbd, 0xde, 0x19, 0x7e, 0x08, 0x3b, 0xb9, 0xb0, - 0xfd, 0xc2, 0x74, 0xaa, 0xb9, 0xb3, 0xa7, 0xa6, 0xb4, 0x0f, 0x90, 0x0e, 0x60, 0xbe, 0x81, 0x15, - 0x52, 0x49, 0x3d, 0xaa, 0xf7, 0x9b, 0xd1, 0x68, 0x43, 0xb5, 0x88, 0x60, 0x07, 0x6e, 0x45, 0x2c, - 0x98, 0xaf, 0x55, 0x55, 0xa5, 0xf6, 0xde, 0x9e, 0x1e, 0xf7, 0x58, 0x40, 0x49, 0x0a, 0x39, 0x75, - 0xd8, 0xca, 0x5d, 0xf8, 0x36, 0x18, 0xdd, 0x8e, 0xa6, 0x69, 0x12, 0xa3, 0xdb, 0x69, 0xff, 0x40, - 0x80, 0x8b, 0x8b, 0xa6, 0xf6, 0x98, 0xc6, 0xf8, 0x04, 0x4a, 0xca, 0xc2, 0x7b, 0xaa, 0xde, 0x9a, - 0xb5, 0xaf, 0x37, 0x56, 0x83, 0xe9, 0xe2, 0x3b, 0x1b, 0xf8, 0x1d, 0xd4, 0x5e, 0x52, 0xb9, 0xea, - 0x57, 0xc6, 0x07, 0x2a, 0xf5, 0x1f, 0x8f, 0x40, 0xbd, 0xb9, 0x3e, 0x20, 0xaf, 0xff, 0xa2, 0x71, - 0x71, 0x69, 0xa3, 0x5f, 0x97, 0xf6, 0xc6, 0xe7, 0x99, 0x8d, 0x2e, 0x66, 0x36, 0xfa, 0x39, 0xb3, - 0xd1, 0xef, 0x99, 0x8d, 0xbe, 0xfe, 0xb1, 0x37, 0x06, 0x65, 0xfd, 0xd8, 0x3c, 0xfd, 0x1b, 0x00, - 0x00, 0xff, 0xff, 0x43, 0x46, 0x5d, 0x7f, 0xac, 0x04, 0x00, 0x00, + 0x10, 0xed, 0xda, 0xa1, 0x6d, 0xa6, 0x29, 0x54, 0x0b, 0xa2, 0x26, 0x4d, 0xdd, 0xc8, 0x1c, 0x88, + 0x04, 0xd8, 0x4a, 0x10, 0x88, 0x6b, 0x69, 0x10, 0x8a, 0x44, 0x23, 0xb0, 0xca, 0x85, 0x03, 0x91, + 0x63, 0x6f, 0x83, 0x95, 0xc4, 0xbb, 0x78, 0xd7, 0x11, 0xe1, 0xc4, 0x81, 0x0f, 0xe0, 0xc0, 0x99, + 0xff, 0xe0, 0x0f, 0x7a, 0xe4, 0xc8, 0x91, 0x86, 0x1f, 0x41, 0xbb, 0x8e, 0x53, 0xa7, 0x49, 0x40, + 0x3d, 0x79, 0x76, 0xde, 0xcc, 0xf8, 0xcd, 0xbc, 0xd9, 0x85, 0xa2, 0xc7, 0x42, 0x9b, 0xc5, 0x54, + 0x50, 0xac, 0x8d, 0xea, 0xe5, 0x87, 0xbd, 0x50, 0xbc, 0x4f, 0xba, 0xb6, 0x4f, 0x87, 0x4e, 0x8f, + 0xf6, 0xa8, 0xa3, 0xa0, 0x6e, 0x72, 0xaa, 0x4e, 0xea, 0xa0, 0xac, 0x34, 0xc5, 0xda, 0x87, 0xbd, + 0xc3, 0xc1, 0x80, 0xfa, 0x9e, 0xf0, 0xba, 0x03, 0xe2, 0x12, 0x4e, 0x93, 0xd8, 0x27, 0xdc, 0x25, + 0x1f, 0x12, 0xc2, 0x85, 0xf5, 0x0d, 0x41, 0x65, 0x39, 0xce, 0x19, 0x8d, 0x38, 0xc1, 0x36, 0x6c, + 0x04, 0x64, 0x14, 0xfa, 0x84, 0x1b, 0xa8, 0xaa, 0xd7, 0xb6, 0x1a, 0xb7, 0xec, 0x51, 0xdd, 0x3e, + 0xa2, 0x91, 0xf0, 0xc2, 0x88, 0xc4, 0xcd, 0x14, 0x73, 0xb3, 0x20, 0xbc, 0x0b, 0x1b, 0x3e, 0x4b, + 0x3a, 0x61, 0xc0, 0x0d, 0xad, 0xaa, 0xd7, 0x74, 0x77, 0xdd, 0x67, 0x49, 0x2b, 0xe0, 0xf8, 0x3e, + 0xac, 0x0f, 0xc9, 0x90, 0xc6, 0x63, 0x43, 0x57, 0x75, 0x6e, 0xce, 0xd5, 0x39, 0x56, 0x90, 0x3b, + 0x0d, 0xb1, 0xee, 0xc0, 0xee, 0xcb, 0x90, 0x8b, 0x57, 0x34, 0x58, 0x60, 0xfc, 0x1a, 0x8c, 0x45, + 0x68, 0x4a, 0xf6, 0x31, 0x6c, 0x33, 0x1a, 0x74, 0xe2, 0x0c, 0x98, 0x52, 0xde, 0x91, 0xbf, 0x9a, + 0x4b, 0x28, 0xb1, 0xdc, 0xc9, 0xfa, 0x08, 0xa5, 0x3c, 0x8a, 0x31, 0x14, 0x22, 0x6f, 0x48, 0x0c, + 0x54, 0x45, 0xb5, 0xa2, 0xab, 0x6c, 0x5c, 0x81, 0xa2, 0xfc, 0x72, 0xe6, 0xf9, 0xc4, 0xd0, 0x14, + 0x70, 0xe1, 0xc0, 0x4f, 0x00, 0xfc, 0xac, 0x15, 0x3e, 0x6d, 0xf0, 0xf6, 0x5c, 0x83, 0x17, 0xff, + 0xce, 0x45, 0x5a, 0xdf, 0x11, 0xe0, 0xc5, 0x90, 0xa5, 0x04, 0x72, 0x42, 0x68, 0x57, 0x14, 0x42, + 0x5f, 0x21, 0x44, 0xe1, 0xff, 0x42, 0x08, 0xb8, 0x71, 0x09, 0xc2, 0x07, 0xb0, 0x95, 0x82, 0x1d, + 0x31, 0x66, 0x19, 0x47, 0x48, 0x5d, 0x27, 0x63, 0x46, 0x24, 0x7b, 0x1e, 0x7e, 0x4a, 0xa7, 0x54, + 0x70, 0x95, 0x8d, 0x1f, 0xc0, 0xa6, 0xa0, 0x8c, 0x0e, 0x68, 0x4f, 0xea, 0x8f, 0x32, 0x51, 0x4e, + 0xa6, 0xbe, 0x56, 0x74, 0x4a, 0xdd, 0x59, 0x84, 0xf5, 0x05, 0xc1, 0xce, 0xe5, 0xce, 0xf0, 0x5d, + 0xd8, 0xce, 0x84, 0xed, 0xe4, 0xa6, 0x53, 0xca, 0x9c, 0x6d, 0x39, 0xa5, 0x7d, 0x80, 0x74, 0x00, + 0xb3, 0x0d, 0x2c, 0xba, 0xc5, 0xd4, 0x23, 0x7b, 0xbf, 0x1a, 0x8d, 0x06, 0x94, 0xf2, 0x08, 0xb6, + 0xe0, 0x5a, 0x44, 0x83, 0xd9, 0x5a, 0x95, 0x64, 0x6a, 0xfb, 0xcd, 0xf1, 0x61, 0x9b, 0x06, 0xc4, + 0x4d, 0x21, 0xab, 0x0c, 0x9b, 0x99, 0x0b, 0x5f, 0x07, 0xad, 0xd5, 0x54, 0x34, 0x75, 0x57, 0x6b, + 0x35, 0x1b, 0x3f, 0x10, 0xe0, 0xfc, 0xa2, 0xc9, 0x3d, 0x26, 0x31, 0x3e, 0x82, 0x82, 0xb4, 0xf0, + 0x9e, 0xac, 0xb7, 0x62, 0xed, 0xcb, 0x95, 0xe5, 0x60, 0xba, 0xf8, 0xd6, 0x1a, 0x7e, 0x07, 0xbb, + 0x2f, 0x88, 0x58, 0x76, 0x95, 0xf1, 0x81, 0x4c, 0xfd, 0xc7, 0x23, 0x50, 0xae, 0xae, 0x0e, 0xc8, + 0xea, 0x3f, 0x7b, 0x7e, 0x76, 0x6e, 0xa2, 0x5f, 0xe7, 0xe6, 0xda, 0xe7, 0x89, 0x89, 0xce, 0x26, + 0x26, 0xfa, 0x39, 0x31, 0xd1, 0xef, 0x89, 0x89, 0xbe, 0xfe, 0x31, 0xd7, 0xde, 0xde, 0xeb, 0x3f, + 0xe5, 0x76, 0x48, 0x9d, 0x7e, 0xd2, 0x25, 0x03, 0x22, 0x1c, 0xd6, 0xef, 0x39, 0x1e, 0x0b, 0xb9, + 0xc3, 0x68, 0x30, 0xbb, 0x8d, 0xce, 0xa8, 0xde, 0x5d, 0x57, 0xaf, 0xd2, 0xa3, 0xbf, 0x01, 0x00, + 0x00, 0xff, 0xff, 0xdd, 0x5e, 0x3b, 0x2f, 0xd5, 0x04, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/podresources/v1/api.proto b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/podresources/v1/api.proto index add2aad48749..751dec2a71e0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/podresources/v1/api.proto +++ b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/podresources/v1/api.proto @@ -1,7 +1,8 @@ -// To regenerate api.pb.go run hack/update-generated-pod-resources.sh +// To regenerate api.pb.go run `hack/update-codegen.sh protobindings` syntax = "proto3"; package v1; +option go_package = "k8s.io/kubelet/pkg/apis/podresources/v1"; import "github.com/gogo/protobuf/gogoproto/gogo.proto"; diff --git a/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/podresources/v1alpha1/api.pb.go b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/podresources/v1alpha1/api.pb.go index e4ebb4839778..bfc92132ec9d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/podresources/v1alpha1/api.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/podresources/v1alpha1/api.pb.go @@ -310,29 +310,31 @@ func init() { func init() { proto.RegisterFile("api.proto", fileDescriptor_00212fb1f9d3bf1c) } var fileDescriptor_00212fb1f9d3bf1c = []byte{ - // 343 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x52, 0xb1, 0x4e, 0xc3, 0x30, - 0x10, 0xad, 0xdb, 0x0a, 0xc8, 0xd1, 0x4a, 0xc8, 0x03, 0x84, 0xaa, 0x58, 0xc5, 0x2c, 0x5d, 0x48, - 0xd5, 0xc2, 0x06, 0x13, 0xb0, 0x20, 0x21, 0x40, 0x19, 0x60, 0xa3, 0x4a, 0x13, 0xd3, 0x46, 0xa2, - 0xb1, 0x89, 0x93, 0x8e, 0x88, 0x4f, 0xe0, 0xb3, 0x3a, 0x32, 0x32, 0xd2, 0xf0, 0x23, 0x28, 0xb6, - 0xac, 0x04, 0x5a, 0x98, 0x7c, 0x77, 0xef, 0x9d, 0xdf, 0xf3, 0x9d, 0xc1, 0xf2, 0x44, 0xe8, 0x88, - 0x98, 0x27, 0x1c, 0x6f, 0xcc, 0xfa, 0xde, 0x93, 0x98, 0x78, 0xfd, 0xd6, 0xe1, 0x38, 0x4c, 0x26, - 0xe9, 0xc8, 0xf1, 0xf9, 0xb4, 0x37, 0xe6, 0x63, 0xde, 0x53, 0x84, 0x51, 0xfa, 0xa8, 0x32, 0x95, - 0xa8, 0x48, 0x37, 0xd2, 0x5d, 0xd8, 0xb9, 0x0a, 0x65, 0x72, 0xcb, 0x03, 0x97, 0x49, 0x9e, 0xc6, - 0x3e, 0x93, 0x2e, 0x7b, 0x4e, 0x99, 0x4c, 0xe8, 0x3d, 0xd8, 0xcb, 0x90, 0x14, 0x3c, 0x92, 0x0c, - 0x9f, 0x40, 0x53, 0xf0, 0x60, 0x18, 0x1b, 0xc0, 0x46, 0x9d, 0x5a, 0x77, 0x73, 0xb0, 0xed, 0x18, - 0x1f, 0xce, 0x8f, 0xb6, 0x86, 0x28, 0x65, 0xf4, 0x05, 0x1a, 0x65, 0x14, 0x63, 0xa8, 0x47, 0xde, - 0x94, 0xd9, 0xa8, 0x83, 0xba, 0x96, 0xab, 0x62, 0xdc, 0x06, 0x2b, 0x3f, 0xa5, 0xf0, 0x7c, 0x66, - 0x57, 0x15, 0x50, 0x14, 0xf0, 0x29, 0x80, 0xcf, 0xa3, 0xc4, 0x0b, 0x23, 0x16, 0x4b, 0xbb, 0xa6, - 0xb4, 0xdb, 0x85, 0xf6, 0xb9, 0xc1, 0x0a, 0x07, 0x25, 0x3e, 0x7d, 0x00, 0xbc, 0xcc, 0x58, 0xe9, - 0xe2, 0x18, 0xd6, 0x03, 0x36, 0x0b, 0xf3, 0x07, 0x56, 0x95, 0x48, 0x6b, 0x85, 0xc8, 0x85, 0x66, - 0xb8, 0x86, 0x4a, 0xef, 0x60, 0xeb, 0x37, 0x88, 0x0f, 0xa0, 0x69, 0x86, 0x35, 0x2c, 0xc9, 0x34, - 0x4c, 0xf1, 0x3a, 0x97, 0xdb, 0x03, 0xd0, 0x77, 0x0c, 0xc3, 0x40, 0x2b, 0x5a, 0xae, 0xa5, 0x2b, - 0x97, 0x81, 0x1c, 0x30, 0xc0, 0xe5, 0xb9, 0xe5, 0xcb, 0x61, 0x31, 0xbe, 0x81, 0x7a, 0x1e, 0xe1, - 0xfd, 0xc2, 0xda, 0x1f, 0x1b, 0x6d, 0xd1, 0xff, 0x28, 0x7a, 0xb3, 0xb4, 0x72, 0xd6, 0x9e, 0x2f, - 0x08, 0xfa, 0x58, 0x90, 0xca, 0x6b, 0x46, 0xd0, 0x3c, 0x23, 0xe8, 0x3d, 0x23, 0xe8, 0x33, 0x23, - 0xe8, 0xed, 0x8b, 0x54, 0x46, 0x6b, 0xea, 0xdf, 0x1c, 0x7d, 0x07, 0x00, 0x00, 0xff, 0xff, 0xc0, - 0xce, 0xf2, 0x80, 0x7d, 0x02, 0x00, 0x00, + // 377 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0x3f, 0x4f, 0xf3, 0x30, + 0x10, 0xc6, 0xeb, 0xb6, 0x7a, 0xdf, 0x37, 0xf7, 0xb6, 0x12, 0xf2, 0x00, 0xa1, 0x2a, 0x51, 0x09, + 0x4b, 0x97, 0x26, 0x6a, 0x61, 0x40, 0x82, 0x09, 0x58, 0x10, 0x08, 0x50, 0x06, 0x90, 0x18, 0xa8, + 0xf2, 0xc7, 0xa4, 0x56, 0xdb, 0xd8, 0xc4, 0x49, 0x47, 0xc4, 0x47, 0xe0, 0x63, 0x75, 0x64, 0x64, + 0xa4, 0xe1, 0x8b, 0xa0, 0x38, 0x0a, 0x09, 0xb4, 0x30, 0xe5, 0xee, 0x9e, 0xc7, 0xf9, 0x9d, 0x7d, + 0x07, 0x8a, 0xcd, 0xa9, 0xc1, 0x43, 0x16, 0x31, 0xfc, 0x6f, 0xd6, 0xb7, 0x27, 0x7c, 0x64, 0xf7, + 0x5b, 0x3d, 0x9f, 0x46, 0xa3, 0xd8, 0x31, 0x5c, 0x36, 0x35, 0x7d, 0xe6, 0x33, 0x53, 0x1a, 0x9c, + 0xf8, 0x5e, 0x66, 0x32, 0x91, 0x51, 0x76, 0x50, 0xdf, 0x84, 0x8d, 0x73, 0x2a, 0xa2, 0x2b, 0xe6, + 0x59, 0x44, 0xb0, 0x38, 0x74, 0x89, 0xb0, 0xc8, 0x43, 0x4c, 0x44, 0xa4, 0xdf, 0x80, 0xba, 0x2c, + 0x09, 0xce, 0x02, 0x41, 0xf0, 0x01, 0x34, 0x39, 0xf3, 0x86, 0x61, 0x2e, 0xa8, 0xa8, 0x53, 0xeb, + 0xfe, 0x1f, 0xac, 0x1b, 0x79, 0x1f, 0xc6, 0x97, 0x63, 0x0d, 0x5e, 0xca, 0xf4, 0x47, 0x68, 0x94, + 0x55, 0x8c, 0xa1, 0x1e, 0xd8, 0x53, 0xa2, 0xa2, 0x0e, 0xea, 0x2a, 0x96, 0x8c, 0x71, 0x1b, 0x94, + 0xf4, 0x2b, 0xb8, 0xed, 0x12, 0xb5, 0x2a, 0x85, 0xa2, 0x80, 0x0f, 0x01, 0x5c, 0x16, 0x44, 0x36, + 0x0d, 0x48, 0x28, 0xd4, 0x9a, 0x64, 0xb7, 0x0b, 0xf6, 0x71, 0xae, 0x15, 0x1d, 0x94, 0xfc, 0xfa, + 0x1d, 0xe0, 0x65, 0xc7, 0xca, 0x2e, 0xf6, 0xe0, 0xaf, 0x47, 0x66, 0x34, 0xbd, 0x60, 0x55, 0x42, + 0x5a, 0x2b, 0x20, 0x27, 0x99, 0xc3, 0xca, 0xad, 0xfa, 0x35, 0xac, 0x7d, 0x17, 0xf1, 0x0e, 0x34, + 0xf3, 0xc7, 0x1a, 0x96, 0x30, 0x8d, 0xbc, 0x78, 0x91, 0xe2, 0xb6, 0x00, 0xb2, 0x7f, 0x0c, 0xa9, + 0x97, 0x11, 0x15, 0x4b, 0xc9, 0x2a, 0xa7, 0x9e, 0x18, 0x10, 0xc0, 0xe5, 0x77, 0x4b, 0x87, 0x43, + 0x42, 0x7c, 0x09, 0xf5, 0x34, 0xc2, 0xdb, 0x45, 0x6b, 0x3f, 0x4c, 0xb4, 0xa5, 0xff, 0x66, 0xc9, + 0x26, 0xab, 0x57, 0x8e, 0xce, 0xe6, 0x0b, 0x0d, 0xbd, 0x2e, 0xb4, 0xca, 0x53, 0xa2, 0xa1, 0x79, + 0xa2, 0xa1, 0x97, 0x44, 0x43, 0x6f, 0x89, 0x86, 0x9e, 0xdf, 0xb5, 0xca, 0x6d, 0x6f, 0xbc, 0x2f, + 0x0c, 0xca, 0xcc, 0x71, 0xec, 0x90, 0x09, 0x89, 0x4c, 0x3e, 0xf6, 0x4d, 0x9b, 0x53, 0x61, 0x72, + 0xe6, 0x7d, 0xae, 0x83, 0x99, 0x73, 0x9c, 0x3f, 0x72, 0xcd, 0x76, 0x3f, 0x02, 0x00, 0x00, 0xff, + 0xff, 0xa5, 0xe5, 0x34, 0x76, 0xac, 0x02, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -822,10 +824,7 @@ func (m *ListPodResourcesRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { @@ -909,10 +908,7 @@ func (m *ListPodResourcesResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { @@ -1060,10 +1056,7 @@ func (m *PodResources) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { @@ -1179,10 +1172,7 @@ func (m *ContainerResources) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { @@ -1296,10 +1286,7 @@ func (m *ContainerDevices) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { diff --git a/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/podresources/v1alpha1/api.proto b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/podresources/v1alpha1/api.proto index 782ac714b6a4..52e81bbf0e8c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/podresources/v1alpha1/api.proto +++ b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/apis/podresources/v1alpha1/api.proto @@ -1,7 +1,8 @@ -// To regenerate api.pb.go run hack/update-generated-pod-resources.sh +// To regenerate api.pb.go run `hack/update-codegen.sh protobindings` syntax = "proto3"; package v1alpha1; +option go_package = "k8s.io/kubelet/pkg/apis/podresources/v1alpha1"; import "github.com/gogo/protobuf/gogoproto/gogo.proto"; @@ -45,4 +46,4 @@ message ContainerResources { message ContainerDevices { string resource_name = 1; repeated string device_ids = 2; -} \ No newline at end of file +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/conntrack.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/conntrack.go index 6f0a973c503f..4ff3c383735a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/conntrack.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/conntrack.go @@ -49,7 +49,7 @@ func (rct realConntracker) SetMax(max int) error { if err := rct.setIntSysCtl("nf_conntrack_max", max); err != nil { return err } - klog.InfoS("Setting nf_conntrack_max", "nf_conntrack_max", max) + klog.InfoS("Setting nf_conntrack_max", "nfConntrackMax", max) // Linux does not support writing to /sys/module/nf_conntrack/parameters/hashsize // when the writer process is not in the initial network namespace @@ -80,7 +80,7 @@ func (rct realConntracker) SetMax(max int) error { return errReadOnlySysFS } // TODO: generify this and sysctl to a new sysfs.WriteInt() - klog.InfoS("Setting conntrack hashsize", "conntrack hashsize", max/4) + klog.InfoS("Setting conntrack hashsize", "conntrackHashsize", max/4) return writeIntStringFile("/sys/module/nf_conntrack/parameters/hashsize", max/4) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server.go index 0aff9d2ca317..0f13dc72dc38 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server.go @@ -57,6 +57,7 @@ import ( componentbaseconfig "k8s.io/component-base/config" "k8s.io/component-base/configz" "k8s.io/component-base/logs" + logsapi "k8s.io/component-base/logs/api/v1" metricsfeatures "k8s.io/component-base/metrics/features" "k8s.io/component-base/metrics/legacyregistry" "k8s.io/component-base/metrics/prometheus/slis" @@ -89,6 +90,7 @@ import ( func init() { utilruntime.Must(metricsfeatures.AddFeatureGates(utilfeature.DefaultMutableFeatureGate)) + logsapi.AddFeatureGates(utilfeature.DefaultMutableFeatureGate) } // proxyRun defines the interface to run a specified ProxyServer @@ -363,7 +365,7 @@ func (o *Options) writeConfigFile() (err error) { return err } - klog.InfoS("Wrote configuration", "WriteConfigTo", o.WriteConfigTo) + klog.InfoS("Wrote configuration", "file", o.WriteConfigTo) return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_others.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_others.go index 9836584bfa4f..7e2df8d92859 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_others.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_others.go @@ -54,6 +54,7 @@ import ( "k8s.io/kubernetes/pkg/proxy/iptables" "k8s.io/kubernetes/pkg/proxy/ipvs" proxymetrics "k8s.io/kubernetes/pkg/proxy/metrics" + proxyutil "k8s.io/kubernetes/pkg/proxy/util" proxyutiliptables "k8s.io/kubernetes/pkg/proxy/util/iptables" utilipset "k8s.io/kubernetes/pkg/util/ipset" utiliptables "k8s.io/kubernetes/pkg/util/iptables" @@ -140,13 +141,15 @@ func newProxyServer( if err != nil { return nil, err } - klog.InfoS("NodeInfo", "PodCIDR", nodeInfo.Spec.PodCIDR, "PodCIDRs", nodeInfo.Spec.PodCIDRs) + klog.InfoS("NodeInfo", "podCIDR", nodeInfo.Spec.PodCIDR, "podCIDRs", nodeInfo.Spec.PodCIDRs) } - klog.V(2).InfoS("DetectLocalMode", "LocalMode", string(detectLocalMode)) + klog.V(2).InfoS("DetectLocalMode", "localMode", string(detectLocalMode)) + primaryFamily := v1.IPv4Protocol primaryProtocol := utiliptables.ProtocolIPv4 if netutils.IsIPv6(nodeIP) { + primaryFamily = v1.IPv6Protocol primaryProtocol = utiliptables.ProtocolIPv6 } execer := exec.New() @@ -165,10 +168,21 @@ func newProxyServer( ipt[1] = iptInterface } - for _, perFamilyIpt := range ipt { - if !perFamilyIpt.Present() { - klog.V(0).InfoS("kube-proxy running in single-stack mode, this ipFamily is not supported", "ipFamily", perFamilyIpt.Protocol()) - dualStack = false + nodePortAddresses := config.NodePortAddresses + + if !ipt[0].Present() { + return nil, fmt.Errorf("iptables is not supported for primary IP family %q", primaryProtocol) + } else if !ipt[1].Present() { + klog.InfoS("kube-proxy running in single-stack mode: secondary ipFamily is not supported", "ipFamily", ipt[1].Protocol()) + dualStack = false + + // Validate NodePortAddresses is single-stack + npaByFamily := proxyutil.MapCIDRsByIPFamily(config.NodePortAddresses) + secondaryFamily := proxyutil.OtherIPFamily(primaryFamily) + badAddrs := npaByFamily[secondaryFamily] + if len(badAddrs) > 0 { + klog.InfoS("Ignoring --nodeport-addresses of the wrong family", "ipFamily", secondaryFamily, "addresses", badAddrs) + nodePortAddresses = npaByFamily[primaryFamily] } } @@ -204,7 +218,7 @@ func newProxyServer( nodeIPTuple(config.BindAddress), recorder, healthzServer, - config.NodePortAddresses, + nodePortAddresses, ) } else { // Create a single-stack proxier if and only if the node does not support dual-stack (i.e, no iptables support). @@ -216,6 +230,7 @@ func newProxyServer( // TODO this has side effects that should only happen when Run() is invoked. proxier, err = iptables.NewProxier( + primaryFamily, iptInterface, utilsysctl.New(), execer, @@ -229,7 +244,7 @@ func newProxyServer( nodeIP, recorder, healthzServer, - config.NodePortAddresses, + nodePortAddresses, ) } @@ -240,10 +255,10 @@ func newProxyServer( } else if proxyMode == proxyconfigapi.ProxyModeIPVS { kernelHandler := ipvs.NewLinuxKernelHandler() ipsetInterface = utilipset.New(execer) - if err := ipvs.CanUseIPVSProxier(kernelHandler, ipsetInterface, config.IPVS.Scheduler); err != nil { + ipvsInterface = utilipvs.New() + if err := ipvs.CanUseIPVSProxier(ipvsInterface, ipsetInterface, config.IPVS.Scheduler); err != nil { return nil, fmt.Errorf("can't use the IPVS proxier: %v", err) } - ipvsInterface = utilipvs.New() klog.InfoS("Using ipvs Proxier") if dualStack { @@ -279,7 +294,7 @@ func newProxyServer( recorder, healthzServer, config.IPVS.Scheduler, - config.NodePortAddresses, + nodePortAddresses, kernelHandler, ) } else { @@ -290,6 +305,7 @@ func newProxyServer( } proxier, err = ipvs.NewProxier( + primaryFamily, iptInterface, ipvsInterface, ipsetInterface, @@ -310,7 +326,7 @@ func newProxyServer( recorder, healthzServer, config.IPVS.Scheduler, - config.NodePortAddresses, + nodePortAddresses, kernelHandler, ) } @@ -406,7 +422,7 @@ func getDetectLocalMode(config *proxyconfigapi.KubeProxyConfiguration) (proxycon if strings.TrimSpace(mode.String()) != "" { return mode, fmt.Errorf("unknown detect-local-mode: %v", mode) } - klog.V(4).InfoS("Defaulting detect-local-mode", "LocalModeClusterCIDR", string(proxyconfigapi.LocalModeClusterCIDR)) + klog.V(4).InfoS("Defaulting detect-local-mode", "localModeClusterCIDR", string(proxyconfigapi.LocalModeClusterCIDR)) return proxyconfigapi.LocalModeClusterCIDR, nil } } @@ -436,7 +452,7 @@ func getLocalDetector(mode proxyconfigapi.LocalMode, config *proxyconfigapi.Kube } return proxyutiliptables.NewDetectLocalByInterfaceNamePrefix(config.DetectLocal.InterfaceNamePrefix) } - klog.InfoS("Defaulting to no-op detect-local", "detect-local-mode", string(mode)) + klog.InfoS("Defaulting to no-op detect-local", "detectLocalMode", string(mode)) return proxyutiliptables.NewNoOpLocalDetector(), nil } @@ -462,7 +478,7 @@ func getDualStackLocalDetectorTuple(mode proxyconfigapi.LocalMode, config *proxy } if len(strings.TrimSpace(clusterCIDRs[1])) == 0 { - klog.InfoS("Detect-local-mode set to ClusterCIDR, but no IPv6 cluster CIDR defined, , defaulting to no-op detect-local for IPv6") + klog.InfoS("Detect-local-mode set to ClusterCIDR, but no IPv6 cluster CIDR defined, defaulting to no-op detect-local for IPv6") } else { localDetectors[1], err = proxyutiliptables.NewDetectLocalByCIDR(clusterCIDRs[1], ipt[1]) } @@ -500,9 +516,9 @@ func getDualStackLocalDetectorTuple(mode proxyconfigapi.LocalMode, config *proxy } return localDetectors, err default: - klog.InfoS("Unknown detect-local-mode", "detect-local-mode", mode) + klog.InfoS("Unknown detect-local-mode", "detectLocalMode", mode) } - klog.InfoS("Defaulting to no-op detect-local", "detect-local-mode", string(mode)) + klog.InfoS("Defaulting to no-op detect-local", "detectLocalMode", string(mode)) return localDetectors, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_windows.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_windows.go index 067ad97371ca..4ca74badc1b0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_windows.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_windows.go @@ -106,7 +106,7 @@ func newProxyServer(config *proxyconfigapi.KubeProxyConfiguration, master string proxyMode := proxyconfigapi.ProxyModeKernelspace dualStackMode := getDualStackMode(config.Winkernel.NetworkName, winkernel.DualStackCompatTester{}) if dualStackMode { - klog.V(0).InfoS("Creating dualStackProxier for Windows kernel.") + klog.InfoS("Creating dualStackProxier for Windows kernel.") proxier, err = winkernel.NewDualStackProxier( config.IPTables.SyncPeriod.Duration, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/init_windows.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/init_windows.go index 1696f0431b67..f089df774972 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/init_windows.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/init_windows.go @@ -23,9 +23,9 @@ import ( "fmt" "unsafe" - "github.com/pkg/errors" "golang.org/x/sys/windows" "k8s.io/klog/v2" + "k8s.io/kubernetes/pkg/windows/service" ) @@ -57,7 +57,7 @@ func getPriorityValue(priorityClassName string) uint32 { func createWindowsJobObject(pc uint32) (windows.Handle, error) { job, err := windows.CreateJobObject(nil, nil) if err != nil { - return windows.InvalidHandle, errors.Wrap(err, "windows.CreateJobObject failed") + return windows.InvalidHandle, fmt.Errorf("windows.CreateJobObject failed: %w", err) } limitInfo := windows.JOBOBJECT_BASIC_LIMIT_INFORMATION{ LimitFlags: windows.JOB_OBJECT_LIMIT_PRIORITY_CLASS, @@ -68,7 +68,7 @@ func createWindowsJobObject(pc uint32) (windows.Handle, error) { windows.JobObjectBasicLimitInformation, uintptr(unsafe.Pointer(&limitInfo)), uint32(unsafe.Sizeof(limitInfo))); err != nil { - return windows.InvalidHandle, errors.Wrap(err, "windows.SetInformationJobObject failed") + return windows.InvalidHandle, fmt.Errorf("windows.SetInformationJobObject failed: %w", err) } return job, nil } @@ -85,7 +85,7 @@ func initForOS(windowsService bool, windowsPriorityClass string) error { return err } if err := windows.AssignProcessToJobObject(job, windows.CurrentProcess()); err != nil { - return errors.Wrap(err, "windows.AssignProcessToJobObject failed") + return fmt.Errorf("windows.AssignProcessToJobObject failed: %w", err) } if windowsService { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/options.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/options.go index f184a5c2cf08..8be25ccdc4a0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/options.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/options.go @@ -98,10 +98,6 @@ type KubeletFlags struct { // Source: https://docs.microsoft.com/en-us/windows/win32/procthread/scheduling-priorities WindowsPriorityClass string - // remoteRuntimeEndpoint is the endpoint of remote runtime service - RemoteRuntimeEndpoint string - // remoteImageEndpoint is the endpoint of remote image service - RemoteImageEndpoint string // experimentalMounterPath is the path of mounter binary. Leave empty to use the default mount path ExperimentalMounterPath string // This flag, if set, will avoid including `EvictionHard` limits while computing Node Allocatable. @@ -193,13 +189,6 @@ func ValidateKubeletFlags(f *KubeletFlags) error { return fmt.Errorf("unsupported CRI runtime: %q, only %q is currently supported", f.ContainerRuntime, kubetypes.RemoteContainerRuntime) } - // Note: maybe we can test it for being a valid socket address as an additional improvement. - // The only problem with it will be that some setups may not specify 'unix://' prefix. - // So just check empty for back compat. - if f.RemoteRuntimeEndpoint == "" { - return fmt.Errorf("the container runtime endpoint address was not specified or empty, use --container-runtime-endpoint to set") - } - return nil } @@ -323,9 +312,6 @@ func (f *KubeletFlags) AddFlags(mainfs *pflag.FlagSet) { fs.StringVar(&f.RootDirectory, "root-dir", f.RootDirectory, "Directory path for managing kubelet files (volume mounts,etc).") - fs.StringVar(&f.RemoteRuntimeEndpoint, "container-runtime-endpoint", f.RemoteRuntimeEndpoint, "The endpoint of remote runtime service. Unix Domain Sockets are supported on Linux, while npipe and tcp endpoints are supported on Windows. Examples:'unix:///path/to/runtime.sock', 'npipe:////./pipe/runtime'") - fs.StringVar(&f.RemoteImageEndpoint, "image-service-endpoint", f.RemoteImageEndpoint, "The endpoint of remote image service. If not specified, it will be the same with --container-runtime-endpoint by default. Unix Domain Socket are supported on Linux, while npipe and tcp endpoints are supported on Windows. Examples:'unix:///path/to/runtime.sock', 'npipe:////./pipe/runtime'") - // EXPERIMENTAL FLAGS bindableNodeLabels := cliflag.ConfigurationMap(f.NodeLabels) fs.Var(&bindableNodeLabels, "node-labels", fmt.Sprintf(" Labels to add when registering the node in the cluster. Labels must be key=value pairs separated by ','. Labels in the 'kubernetes.io' namespace must begin with an allowed prefix (%s) or be in the specifically allowed set (%s)", strings.Join(kubeletapis.KubeletLabelNamespaces(), ", "), strings.Join(kubeletapis.KubeletLabels(), ", "))) @@ -399,6 +385,10 @@ func AddKubeletConfigFlags(mainfs *pflag.FlagSet, c *kubeletconfig.KubeletConfig fs.Int32Var(&c.Port, "port", c.Port, "The port for the Kubelet to serve on.") fs.Int32Var(&c.ReadOnlyPort, "read-only-port", c.ReadOnlyPort, "The read-only port for the Kubelet to serve on with no authentication/authorization (set to 0 to disable)") + // runtime flags + fs.StringVar(&c.ContainerRuntimeEndpoint, "container-runtime-endpoint", c.ContainerRuntimeEndpoint, "The endpoint of container runtime service. Unix Domain Sockets are supported on Linux, while npipe and tcp endpoints are supported on Windows. Examples:'unix:///path/to/runtime.sock', 'npipe:////./pipe/runtime'") + fs.StringVar(&c.ImageServiceEndpoint, "image-service-endpoint", c.ImageServiceEndpoint, "The endpoint of container image service. If not specified, it will be the same with --container-runtime-endpoint by default. Unix Domain Socket are supported on Linux, while npipe and tcp endpoints are supported on Windows. Examples:'unix:///path/to/runtime.sock', 'npipe:////./pipe/runtime'") + // Authentication fs.BoolVar(&c.Authentication.Anonymous.Enabled, "anonymous-auth", c.Authentication.Anonymous.Enabled, ""+ "Enables anonymous requests to the Kubelet server. Requests that are not rejected by another "+ @@ -439,7 +429,7 @@ func AddKubeletConfigFlags(mainfs *pflag.FlagSet, c *kubeletconfig.KubeletConfig fs.StringVar(&c.TLSMinVersion, "tls-min-version", c.TLSMinVersion, "Minimum TLS version supported. "+ "Possible values: "+strings.Join(tlsPossibleVersions, ", ")) - fs.BoolVar(&c.RotateCertificates, "rotate-certificates", c.RotateCertificates, " Auto rotate the kubelet client certificates by requesting new certificates from the kube-apiserver when the certificate expiration approaches.") + fs.BoolVar(&c.RotateCertificates, "rotate-certificates", c.RotateCertificates, "Auto rotate the kubelet client certificates by requesting new certificates from the kube-apiserver when the certificate expiration approaches.") fs.Int32Var(&c.RegistryPullQPS, "registry-qps", c.RegistryPullQPS, "If > 0, limit registry pull QPS to this value. If 0, unlimited.") fs.Int32Var(&c.RegistryBurst, "registry-burst", c.RegistryBurst, "Maximum size of a bursty pulls, temporarily allows pulls to burst to this number, while still not exceeding registry-qps. Only used if --registry-qps > 0") diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go index 3899df80ee3f..12c641048cda 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go @@ -465,7 +465,7 @@ func makeEventRecorder(kubeDeps *kubelet.Dependencies, nodeName types.NodeName) } func getReservedCPUs(machineInfo *cadvisorapi.MachineInfo, cpus string) (cpuset.CPUSet, error) { - emptyCPUSet := cpuset.NewCPUSet() + emptyCPUSet := cpuset.New() if cpus == "" { return emptyCPUSet, nil @@ -644,8 +644,8 @@ func run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Depend } if kubeDeps.CAdvisorInterface == nil { - imageFsInfoProvider := cadvisor.NewImageFsInfoProvider(s.RemoteRuntimeEndpoint) - kubeDeps.CAdvisorInterface, err = cadvisor.New(imageFsInfoProvider, s.RootDirectory, cgroupRoots, cadvisor.UsingLegacyCadvisorStats(s.RemoteRuntimeEndpoint), s.LocalStorageCapacityIsolation) + imageFsInfoProvider := cadvisor.NewImageFsInfoProvider(s.ContainerRuntimeEndpoint) + kubeDeps.CAdvisorInterface, err = cadvisor.New(imageFsInfoProvider, s.RootDirectory, cgroupRoots, cadvisor.UsingLegacyCadvisorStats(s.ContainerRuntimeEndpoint), s.LocalStorageCapacityIsolation) if err != nil { return err } @@ -683,11 +683,11 @@ func run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Depend kubeReserved, err := parseResourceList(s.KubeReserved) if err != nil { - return err + return fmt.Errorf("--kube-reserved value failed to parse: %w", err) } systemReserved, err := parseResourceList(s.SystemReserved) if err != nil { - return err + return fmt.Errorf("--system-reserved value failed to parse: %w", err) } var hardEvictionThresholds []evictionapi.Threshold // If the user requested to ignore eviction thresholds, then do not set valid values for hardEvictionThresholds here. @@ -699,7 +699,7 @@ func run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Depend } experimentalQOSReserved, err := cm.ParseQOSReserved(s.QOSReserved) if err != nil { - return err + return fmt.Errorf("--qos-reserved value failed to parse: %w", err) } var cpuManagerPolicyOptions map[string]string @@ -775,7 +775,7 @@ func run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Depend klog.InfoS("Failed to ApplyOOMScoreAdj", "err", err) } - err = kubelet.PreInitRuntimeService(&s.KubeletConfiguration, kubeDeps, s.RemoteRuntimeEndpoint, s.RemoteImageEndpoint) + err = kubelet.PreInitRuntimeService(&s.KubeletConfiguration, kubeDeps) if err != nil { return err } @@ -1265,7 +1265,7 @@ func parseResourceList(m map[string]string) (v1.ResourceList, error) { case v1.ResourceCPU, v1.ResourceMemory, v1.ResourceEphemeralStorage, pidlimit.PIDs: q, err := resource.ParseQuantity(v) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to parse quantity %q for %q resource: %w", v, k, err) } if q.Sign() == -1 { return nil, fmt.Errorf("resource quantity for %q cannot be negative: %v", k, v) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/service/util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/service/util.go index a4262edad243..c73d96a6c4d0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/service/util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/service/util.go @@ -74,7 +74,7 @@ func RequestsOnlyLocalTraffic(service *api.Service) bool { return false } - return service.Spec.ExternalTrafficPolicy == api.ServiceExternalTrafficPolicyTypeLocal + return service.Spec.ExternalTrafficPolicy == api.ServiceExternalTrafficPolicyLocal } // NeedsHealthCheck checks if service needs health check. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/service/warnings.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/service/warnings.go new file mode 100644 index 000000000000..6d0ff8e66515 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/service/warnings.go @@ -0,0 +1,95 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "fmt" + "net/netip" + + "k8s.io/apimachinery/pkg/util/validation/field" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/helper" +) + +func GetWarningsForService(service, oldService *api.Service) []string { + if service == nil { + return nil + } + var warnings []string + + if helper.IsServiceIPSet(service) { + for i, clusterIP := range service.Spec.ClusterIPs { + warnings = append(warnings, getWarningsForIP(field.NewPath("spec").Child("clusterIPs").Index(i), clusterIP)...) + } + } + + for i, externalIP := range service.Spec.ExternalIPs { + warnings = append(warnings, getWarningsForIP(field.NewPath("spec").Child("externalIPs").Index(i), externalIP)...) + } + + if len(service.Spec.LoadBalancerIP) > 0 { + warnings = append(warnings, getWarningsForIP(field.NewPath("spec").Child("loadBalancerIP"), service.Spec.LoadBalancerIP)...) + } + + for i, cidr := range service.Spec.LoadBalancerSourceRanges { + warnings = append(warnings, getWarningsForCIDR(field.NewPath("spec").Child("loadBalancerSourceRanges").Index(i), cidr)...) + } + + return warnings +} + +func getWarningsForIP(fieldPath *field.Path, address string) []string { + // IPv4 addresses with leading zeros CVE-2021-29923 are not valid in golang since 1.17 + // This will also warn about possible future changes on the golang std library + // xref: https://issues.k8s.io/108074 + ip, err := netip.ParseAddr(address) + if err != nil { + return []string{fmt.Sprintf("%s: IP address was accepted, but will be invalid in a future Kubernetes release: %v", fieldPath, err)} + } + // A Recommendation for IPv6 Address Text Representation + // + // "All of the above examples represent the same IPv6 address. This + // flexibility has caused many problems for operators, systems + // engineers, and customers. + // ..." + // https://datatracker.ietf.org/doc/rfc5952/ + if ip.Is6() && ip.String() != address { + return []string{fmt.Sprintf("%s: IPv6 address %q is not in RFC 5952 canonical format (%q), which may cause controller apply-loops", fieldPath, address, ip.String())} + } + return []string{} +} + +func getWarningsForCIDR(fieldPath *field.Path, cidr string) []string { + // IPv4 addresses with leading zeros CVE-2021-29923 are not valid in golang since 1.17 + // This will also warn about possible future changes on the golang std library + // xref: https://issues.k8s.io/108074 + prefix, err := netip.ParsePrefix(cidr) + if err != nil { + return []string{fmt.Sprintf("%s: IP prefix was accepted, but will be invalid in a future Kubernetes release: %v", fieldPath, err)} + } + // A Recommendation for IPv6 Address Text Representation + // + // "All of the above examples represent the same IPv6 address. This + // flexibility has caused many problems for operators, systems + // engineers, and customers. + // ..." + // https://datatracker.ietf.org/doc/rfc5952/ + if prefix.Addr().Is6() && prefix.String() != cidr { + return []string{fmt.Sprintf("%s: IPv6 prefix %q is not in RFC 5952 canonical format (%q), which may cause controller apply-loops", fieldPath, cidr, prefix.String())} + } + return []string{} +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/v1/service/util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/v1/service/util.go index a88ead0d1e1e..683b8ed1f7d5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/v1/service/util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/v1/service/util.go @@ -73,7 +73,7 @@ func ExternalPolicyLocal(service *v1.Service) bool { service.Spec.Type != v1.ServiceTypeNodePort { return false } - return service.Spec.ExternalTrafficPolicy == v1.ServiceExternalTrafficPolicyTypeLocal + return service.Spec.ExternalTrafficPolicy == v1.ServiceExternalTrafficPolicyLocal } // InternalPolicyLocal checks if service has ITP = Local. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/validation/validation.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/validation/validation.go index 7fb8160c1a36..027ac8923b1a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/validation/validation.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/validation/validation.go @@ -39,10 +39,10 @@ import ( // Prefix indicates this name will be used as part of generation, in which case // trailing dashes are allowed. func ValidateStatefulSetName(name string, prefix bool) []string { - // TODO: Validate that there's name for the suffix inserted by the pods. + // TODO: Validate that there's room for the suffix inserted by the pods. // Currently this is just "-index". In the future we may allow a user // specified list of suffixes and we need to validate the longest one. - return apimachineryvalidation.NameIsDNSSubdomain(name, prefix) + return apimachineryvalidation.NameIsDNSLabel(name, prefix) } // ValidatePodTemplateSpecForStatefulSet validates the given template and ensures that it is in accordance with the desired selector. @@ -173,10 +173,14 @@ func ValidateStatefulSet(statefulSet *apps.StatefulSet, opts apivalidation.PodVa // ValidateStatefulSetUpdate tests if required fields in the StatefulSet are set. func ValidateStatefulSetUpdate(statefulSet, oldStatefulSet *apps.StatefulSet, opts apivalidation.PodValidationOptions) field.ErrorList { - // first, validate that the new statefulset is valid - allErrs := ValidateStatefulSet(statefulSet, opts) - - allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&statefulSet.ObjectMeta, &oldStatefulSet.ObjectMeta, field.NewPath("metadata"))...) + // First, validate that the new statefulset is valid. Don't call + // ValidateStatefulSet() because we don't want to revalidate the name on + // update. This is important here because we used to allow DNS subdomain + // for name, but that can't actually create pods. The only reasonable + // thing to do it delete such an instance, but if there is a finalizer, it + // would need to pass update validation. Name can't change anyway. + allErrs := apivalidation.ValidateObjectMetaUpdate(&statefulSet.ObjectMeta, &oldStatefulSet.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateStatefulSetSpec(&statefulSet.Spec, field.NewPath("spec"), opts)...) // statefulset updates aren't super common and general updates are likely to be touching spec, so we'll do this // deep copy right away. This avoids mutating our inputs diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/batch/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/batch/types.go index cd3dd9656a23..4b9b865b218c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/batch/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/batch/types.go @@ -158,6 +158,7 @@ type PodFailurePolicyOnExitCodesRequirement struct { // Represents the relationship between the container exit code(s) and the // specified values. Containers completed with success (exit code 0) are // excluded from the requirement check. Possible values are: + // // - In: the requirement is satisfied if at least one container exit code // (might be multiple if there are multiple containers not restricted // by the 'containerName' field) is in the set of specified values. @@ -194,6 +195,7 @@ type PodFailurePolicyOnPodConditionsPattern struct { type PodFailurePolicyRule struct { // Specifies the action taken on a pod failure when the requirements are satisfied. // Possible values are: + // // - FailJob: indicates that the pod's job is marked as Failed and all // running pods are terminated. // - Ignore: indicates that the counter towards the .backoffLimit is not @@ -237,7 +239,7 @@ type JobSpec struct { Parallelism *int32 // Specifies the desired number of successfully finished pods the - // job should be run with. Setting to nil means that the success of any + // job should be run with. Setting to null means that the success of any // pod signals the success of all pods, and allows parallelism to have any positive // value. Setting to 1 means that parallelism is limited to 1 and the success of that // pod signals the success of the job. @@ -305,7 +307,7 @@ type JobSpec struct { // +optional TTLSecondsAfterFinished *int32 - // CompletionMode specifies how Pod completions are tracked. It can be + // completionMode specifies how Pod completions are tracked. It can be // `NonIndexed` (default) or `Indexed`. // // `NonIndexed` means that the Job is considered complete when there have @@ -330,7 +332,7 @@ type JobSpec struct { // +optional CompletionMode *CompletionMode - // Suspend specifies whether the Job controller should create Pods or not. If + // suspend specifies whether the Job controller should create Pods or not. If // a Job is created with suspend set to true, no Pods are created by the Job // controller. If a Job is suspended after creation (i.e. the flag goes from // false to true), the Job controller will delete all active Pods associated @@ -387,7 +389,7 @@ type JobStatus struct { // +optional Failed int32 - // CompletedIndexes holds the completed indexes when .spec.completionMode = + // completedIndexes holds the completed indexes when .spec.completionMode = // "Indexed" in a text format. The indexes are represented as decimal integers // separated by commas. The numbers are listed in increasing order. Three or // more consecutive numbers are compressed and represented by the first and @@ -397,15 +399,16 @@ type JobStatus struct { // +optional CompletedIndexes string - // UncountedTerminatedPods holds the UIDs of Pods that have terminated but + // uncountedTerminatedPods holds the UIDs of Pods that have terminated but // the job controller hasn't yet accounted for in the status counters. // // The job controller creates pods with a finalizer. When a pod terminates // (succeeded or failed), the controller does three steps to account for it // in the job status: - // (1) Add the pod UID to the corresponding array in this field. - // (2) Remove the pod finalizer. - // (3) Remove the pod UID from the array while increasing the corresponding + // + // 1. Add the pod UID to the corresponding array in this field. + // 2. Remove the pod finalizer. + // 3. Remove the pod UID from the array while increasing the corresponding // counter. // // Old jobs might not be tracked using this field, in which case the field @@ -417,12 +420,12 @@ type JobStatus struct { // UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't // been accounted in Job status counters. type UncountedTerminatedPods struct { - // Succeeded holds UIDs of succeeded Pods. + // succeeded holds UIDs of succeeded Pods. // +listType=set // +optional Succeeded []types.UID - // Failed holds UIDs of failed Pods. + // failed holds UIDs of failed Pods. // +listType=set // +optional Failed []types.UID @@ -524,6 +527,7 @@ type CronJobSpec struct { // Specifies how to treat concurrent executions of a Job. // Valid values are: + // // - "Allow" (default): allows CronJobs to run concurrently; // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; // - "Replace": cancels currently running job and replaces it with a new one diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/types.go index fbbecb00d58f..45514929eba4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/types.go @@ -1741,7 +1741,6 @@ type CSIPersistentVolumeSource struct { // ControllerExpandSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // ControllerExpandVolume call. - // This is an beta field and requires enabling ExpandCSIVolumes feature gate. // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional @@ -3781,33 +3780,33 @@ const ( ServiceTypeExternalName ServiceType = "ExternalName" ) -// ServiceInternalTrafficPolicyType describes the endpoint-selection policy for +// ServiceInternalTrafficPolicy describes the endpoint-selection policy for // traffic sent to the ClusterIP. -type ServiceInternalTrafficPolicyType string +type ServiceInternalTrafficPolicy string const ( // ServiceInternalTrafficPolicyCluster routes traffic to all endpoints. - ServiceInternalTrafficPolicyCluster ServiceInternalTrafficPolicyType = "Cluster" + ServiceInternalTrafficPolicyCluster ServiceInternalTrafficPolicy = "Cluster" // ServiceInternalTrafficPolicyLocal routes traffic only to endpoints on the same // node as the traffic was received on (dropping the traffic if there are no // local endpoints). - ServiceInternalTrafficPolicyLocal ServiceInternalTrafficPolicyType = "Local" + ServiceInternalTrafficPolicyLocal ServiceInternalTrafficPolicy = "Local" ) -// ServiceExternalTrafficPolicyType describes the endpoint-selection policy for +// ServiceExternalTrafficPolicy describes the endpoint-selection policy for // traffic to external service entrypoints (NodePorts, ExternalIPs, and // LoadBalancer IPs). -type ServiceExternalTrafficPolicyType string +type ServiceExternalTrafficPolicy string const ( - // ServiceExternalTrafficPolicyTypeCluster routes traffic to all endpoints. - ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster" + // ServiceExternalTrafficPolicyCluster routes traffic to all endpoints. + ServiceExternalTrafficPolicyCluster ServiceExternalTrafficPolicy = "Cluster" - // ServiceExternalTrafficPolicyTypeLocal preserves the source IP of the traffic by + // ServiceExternalTrafficPolicyLocal preserves the source IP of the traffic by // routing only to endpoints on the same node as the traffic was received on // (dropping the traffic if there are no local endpoints). - ServiceExternalTrafficPolicyTypeLocal ServiceExternalTrafficPolicyType = "Local" + ServiceExternalTrafficPolicyLocal ServiceExternalTrafficPolicy = "Local" ) // These are the valid conditions of a service. @@ -4013,7 +4012,7 @@ type ServiceSpec struct { // a NodePort from within the cluster may need to take traffic policy into account // when picking a node. // +optional - ExternalTrafficPolicy ServiceExternalTrafficPolicyType + ExternalTrafficPolicy ServiceExternalTrafficPolicy // healthCheckNodePort specifies the healthcheck nodePort for the service. // If not specified, HealthCheckNodePort is created by the service api @@ -4064,7 +4063,7 @@ type ServiceSpec struct { // "Cluster", uses the standard behavior of routing to all endpoints evenly // (possibly modified by topology and other features). // +optional - InternalTrafficPolicy *ServiceInternalTrafficPolicyType + InternalTrafficPolicy *ServiceInternalTrafficPolicy } // ServicePort represents the port on which the service is exposed @@ -4208,9 +4207,8 @@ type EndpointSubset struct { // EndpointAddress is a tuple that describes single IP address. type EndpointAddress struct { // The IP of this endpoint. - // IPv6 is also accepted but not fully supported on all platforms. Also, certain - // kubernetes components, like kube-proxy, are not IPv6 ready. - // TODO: This should allow hostname or IP, see #4447. + // May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), + // or link-local multicast (224.0.0.0/24 or ff02::/16). IP string // Optional: Hostname of this endpoint // Meant to be used by DNS servers etc. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go index b42221691fa9..06af99974e8b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go @@ -125,7 +125,7 @@ func SetDefaults_Service(obj *v1.Service) { if (obj.Spec.Type == v1.ServiceTypeNodePort || obj.Spec.Type == v1.ServiceTypeLoadBalancer) && obj.Spec.ExternalTrafficPolicy == "" { - obj.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeCluster + obj.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyCluster } if obj.Spec.InternalTrafficPolicy == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/validation/validation.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/validation/validation.go index 4f7ca42e40fe..8f2786b50dfd 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/validation/validation.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/validation/validation.go @@ -20,7 +20,7 @@ import ( "fmt" "strings" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation/field" @@ -60,9 +60,9 @@ func ValidateResourceRequirements(requirements *v1.ResourceRequirements, fldPath if exists { // For GPUs, not only requests can't exceed limits, they also can't be lower, i.e. must be equal. if quantity.Cmp(limitQuantity) != 0 && !v1helper.IsOvercommitAllowed(resourceName) { - allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s limit", resourceName))) + allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s limit of %s", resourceName, limitQuantity.String()))) } else if quantity.Cmp(limitQuantity) > 0 { - allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be less than or equal to %s limit", resourceName))) + allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be less than or equal to %s limit of %s", resourceName, limitQuantity.String()))) } } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go index c27a86783ba2..fcea1fe5db04 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go @@ -7850,7 +7850,7 @@ func autoConvert_v1_ServiceSpec_To_core_ServiceSpec(in *v1.ServiceSpec, out *cor out.LoadBalancerIP = in.LoadBalancerIP out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges)) out.ExternalName = in.ExternalName - out.ExternalTrafficPolicy = core.ServiceExternalTrafficPolicyType(in.ExternalTrafficPolicy) + out.ExternalTrafficPolicy = core.ServiceExternalTrafficPolicy(in.ExternalTrafficPolicy) out.HealthCheckNodePort = in.HealthCheckNodePort out.PublishNotReadyAddresses = in.PublishNotReadyAddresses out.SessionAffinityConfig = (*core.SessionAffinityConfig)(unsafe.Pointer(in.SessionAffinityConfig)) @@ -7858,7 +7858,7 @@ func autoConvert_v1_ServiceSpec_To_core_ServiceSpec(in *v1.ServiceSpec, out *cor out.IPFamilyPolicy = (*core.IPFamilyPolicy)(unsafe.Pointer(in.IPFamilyPolicy)) out.AllocateLoadBalancerNodePorts = (*bool)(unsafe.Pointer(in.AllocateLoadBalancerNodePorts)) out.LoadBalancerClass = (*string)(unsafe.Pointer(in.LoadBalancerClass)) - out.InternalTrafficPolicy = (*core.ServiceInternalTrafficPolicyType)(unsafe.Pointer(in.InternalTrafficPolicy)) + out.InternalTrafficPolicy = (*core.ServiceInternalTrafficPolicy)(unsafe.Pointer(in.InternalTrafficPolicy)) return nil } @@ -7881,12 +7881,12 @@ func autoConvert_core_ServiceSpec_To_v1_ServiceSpec(in *core.ServiceSpec, out *v out.SessionAffinity = v1.ServiceAffinity(in.SessionAffinity) out.SessionAffinityConfig = (*v1.SessionAffinityConfig)(unsafe.Pointer(in.SessionAffinityConfig)) out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges)) - out.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyType(in.ExternalTrafficPolicy) + out.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicy(in.ExternalTrafficPolicy) out.HealthCheckNodePort = in.HealthCheckNodePort out.PublishNotReadyAddresses = in.PublishNotReadyAddresses out.AllocateLoadBalancerNodePorts = (*bool)(unsafe.Pointer(in.AllocateLoadBalancerNodePorts)) out.LoadBalancerClass = (*string)(unsafe.Pointer(in.LoadBalancerClass)) - out.InternalTrafficPolicy = (*v1.ServiceInternalTrafficPolicyType)(unsafe.Pointer(in.InternalTrafficPolicy)) + out.InternalTrafficPolicy = (*v1.ServiceInternalTrafficPolicy)(unsafe.Pointer(in.InternalTrafficPolicy)) return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go index d53339863b35..946147156c7a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go @@ -1531,14 +1531,12 @@ func validateStorageOSPersistentVolumeSource(storageos *core.StorageOSPersistent // validatePVSecretReference check whether provided SecretReference object is valid in terms of secret name and namespace. -func validatePVSecretReference(secretRef *core.SecretReference, allowDNSSubDomainSecretName bool, fldPath *field.Path) field.ErrorList { +func validatePVSecretReference(secretRef *core.SecretReference, fldPath *field.Path) field.ErrorList { var allErrs field.ErrorList if len(secretRef.Name) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) - } else if allowDNSSubDomainSecretName { - allErrs = append(allErrs, ValidateDNS1123Subdomain(secretRef.Name, fldPath.Child("name"))...) } else { - allErrs = append(allErrs, ValidateDNS1123Label(secretRef.Name, fldPath.Child("name"))...) + allErrs = append(allErrs, ValidateDNS1123Subdomain(secretRef.Name, fldPath.Child("name"))...) } if len(secretRef.Namespace) == 0 { @@ -1567,7 +1565,7 @@ func ValidateCSIDriverName(driverName string, fldPath *field.Path) field.ErrorLi return allErrs } -func validateCSIPersistentVolumeSource(csi *core.CSIPersistentVolumeSource, allowDNSSubDomainSecretName bool, fldPath *field.Path) field.ErrorList { +func validateCSIPersistentVolumeSource(csi *core.CSIPersistentVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, ValidateCSIDriverName(csi.Driver, fldPath.Child("driver"))...) @@ -1576,16 +1574,16 @@ func validateCSIPersistentVolumeSource(csi *core.CSIPersistentVolumeSource, allo allErrs = append(allErrs, field.Required(fldPath.Child("volumeHandle"), "")) } if csi.ControllerPublishSecretRef != nil { - allErrs = append(allErrs, validatePVSecretReference(csi.ControllerPublishSecretRef, allowDNSSubDomainSecretName, fldPath.Child("controllerPublishSecretRef"))...) + allErrs = append(allErrs, validatePVSecretReference(csi.ControllerPublishSecretRef, fldPath.Child("controllerPublishSecretRef"))...) } if csi.ControllerExpandSecretRef != nil { - allErrs = append(allErrs, validatePVSecretReference(csi.ControllerExpandSecretRef, allowDNSSubDomainSecretName, fldPath.Child("controllerExpandSecretRef"))...) + allErrs = append(allErrs, validatePVSecretReference(csi.ControllerExpandSecretRef, fldPath.Child("controllerExpandSecretRef"))...) } if csi.NodePublishSecretRef != nil { - allErrs = append(allErrs, validatePVSecretReference(csi.NodePublishSecretRef, allowDNSSubDomainSecretName, fldPath.Child("nodePublishSecretRef"))...) + allErrs = append(allErrs, validatePVSecretReference(csi.NodePublishSecretRef, fldPath.Child("nodePublishSecretRef"))...) } if csi.NodeExpandSecretRef != nil { - allErrs = append(allErrs, validatePVSecretReference(csi.NodeExpandSecretRef, allowDNSSubDomainSecretName, fldPath.Child("nodeExpandSecretRef"))...) + allErrs = append(allErrs, validatePVSecretReference(csi.NodeExpandSecretRef, fldPath.Child("nodeExpandSecretRef"))...) } return allErrs } @@ -1647,8 +1645,6 @@ var allowedTemplateObjectMetaFields = map[string]bool{ type PersistentVolumeSpecValidationOptions struct { // Allow spec to contain the "ReadWiteOncePod" access mode AllowReadWriteOncePod bool - // Allow the secretRef Name field to be of DNSSubDomain Format - AllowDNSSubDomainSecretName bool } // ValidatePersistentVolumeName checks that a name is appropriate for a @@ -1663,8 +1659,7 @@ var supportedVolumeModes = sets.NewString(string(core.PersistentVolumeBlock), st func ValidationOptionsForPersistentVolume(pv, oldPv *core.PersistentVolume) PersistentVolumeSpecValidationOptions { opts := PersistentVolumeSpecValidationOptions{ - AllowReadWriteOncePod: utilfeature.DefaultFeatureGate.Enabled(features.ReadWriteOncePod), - AllowDNSSubDomainSecretName: false, + AllowReadWriteOncePod: utilfeature.DefaultFeatureGate.Enabled(features.ReadWriteOncePod), } if oldPv == nil { // If there's no old PV, use the options based solely on feature enablement @@ -1674,21 +1669,9 @@ func ValidationOptionsForPersistentVolume(pv, oldPv *core.PersistentVolume) Pers // If the old object allowed "ReadWriteOncePod", continue to allow it in the new object opts.AllowReadWriteOncePod = true } - if oldCSI := oldPv.Spec.CSI; oldCSI != nil { - opts.AllowDNSSubDomainSecretName = - secretRefRequiresSubdomainSecretName(oldCSI.ControllerExpandSecretRef) || - secretRefRequiresSubdomainSecretName(oldCSI.ControllerPublishSecretRef) || - secretRefRequiresSubdomainSecretName(oldCSI.NodeStageSecretRef) || - secretRefRequiresSubdomainSecretName(oldCSI.NodePublishSecretRef) - } return opts } -func secretRefRequiresSubdomainSecretName(secretRef *core.SecretReference) bool { - // ref and name were specified and name didn't fit within label validation - return secretRef != nil && len(secretRef.Name) > 0 && len(validation.IsDNS1123Label(secretRef.Name)) > 0 -} - func ValidatePersistentVolumeSpec(pvSpec *core.PersistentVolumeSpec, pvName string, validateInlinePersistentVolumeSpec bool, fldPath *field.Path, opts PersistentVolumeSpecValidationOptions) field.ErrorList { allErrs := field.ErrorList{} @@ -1943,7 +1926,7 @@ func ValidatePersistentVolumeSpec(pvSpec *core.PersistentVolumeSpec, pvName stri allErrs = append(allErrs, field.Forbidden(fldPath.Child("csi"), "may not specify more than 1 volume type")) } else { numVolumes++ - allErrs = append(allErrs, validateCSIPersistentVolumeSource(pvSpec.CSI, opts.AllowDNSSubDomainSecretName, fldPath.Child("csi"))...) + allErrs = append(allErrs, validateCSIPersistentVolumeSource(pvSpec.CSI, fldPath.Child("csi"))...) } } @@ -3654,6 +3637,8 @@ type PodValidationOptions struct { AllowIndivisibleHugePagesValues bool // Allow more DNSSearchPaths and longer DNSSearchListChars AllowExpandedDNSConfig bool + // Allow invalid topologySpreadConstraint labelSelector for backward compatibility + AllowInvalidTopologySpreadConstraintLabelSelector bool } // validatePodMetadataAndSpec tests if required fields in the pod.metadata and pod.spec are set, @@ -3759,7 +3744,7 @@ func ValidatePodSpec(spec *core.PodSpec, podMeta *metav1.ObjectMeta, fldPath *fi allErrs = append(allErrs, validatePodDNSConfig(spec.DNSConfig, &spec.DNSPolicy, fldPath.Child("dnsConfig"), opts)...) allErrs = append(allErrs, validateReadinessGates(spec.ReadinessGates, fldPath.Child("readinessGates"))...) allErrs = append(allErrs, validateSchedulingGates(spec.SchedulingGates, fldPath.Child("schedulingGates"))...) - allErrs = append(allErrs, validateTopologySpreadConstraints(spec.TopologySpreadConstraints, fldPath.Child("topologySpreadConstraints"))...) + allErrs = append(allErrs, validateTopologySpreadConstraints(spec.TopologySpreadConstraints, fldPath.Child("topologySpreadConstraints"), opts)...) allErrs = append(allErrs, validateWindowsHostProcessPod(spec, fldPath)...) allErrs = append(allErrs, validateHostUsers(spec, fldPath)...) if len(spec.ServiceAccountName) > 0 { @@ -4688,6 +4673,11 @@ func ValidatePodEphemeralContainersUpdate(newPod, oldPod *core.Pod, opts PodVali allErrs = append(allErrs, validatePodMetadataAndSpec(newPod, opts)...) allErrs = append(allErrs, ValidatePodSpecificAnnotationUpdates(newPod, oldPod, fldPath.Child("annotations"), opts)...) + // static pods don't support ephemeral containers #113935 + if _, ok := oldPod.Annotations[core.MirrorPodAnnotationKey]; ok { + return field.ErrorList{field.Forbidden(field.NewPath(""), "static pods do not support ephemeral containers")} + } + // Part 2: Validate that the changes between oldPod.Spec.EphemeralContainers and // newPod.Spec.EphemeralContainers are allowed. // @@ -4744,7 +4734,7 @@ var supportedSessionAffinityType = sets.NewString(string(core.ServiceAffinityCli var supportedServiceType = sets.NewString(string(core.ServiceTypeClusterIP), string(core.ServiceTypeNodePort), string(core.ServiceTypeLoadBalancer), string(core.ServiceTypeExternalName)) -var supportedServiceInternalTrafficPolicy = sets.NewString(string(core.ServiceInternalTrafficPolicyCluster), string(core.ServiceExternalTrafficPolicyTypeLocal)) +var supportedServiceInternalTrafficPolicy = sets.NewString(string(core.ServiceInternalTrafficPolicyCluster), string(core.ServiceExternalTrafficPolicyLocal)) var supportedServiceIPFamily = sets.NewString(string(core.IPv4Protocol), string(core.IPv6Protocol)) var supportedServiceIPFamilyPolicy = sets.NewString(string(core.IPFamilyPolicySingleStack), string(core.IPFamilyPolicyPreferDualStack), string(core.IPFamilyPolicyRequireDualStack)) @@ -4976,8 +4966,8 @@ func needsExternalTrafficPolicy(svc *core.Service) bool { } var validExternalTrafficPolicies = sets.NewString( - string(core.ServiceExternalTrafficPolicyTypeCluster), - string(core.ServiceExternalTrafficPolicyTypeLocal)) + string(core.ServiceExternalTrafficPolicyCluster), + string(core.ServiceExternalTrafficPolicyLocal)) func validateServiceExternalTrafficPolicy(service *core.Service) field.ErrorList { allErrs := field.ErrorList{} @@ -5812,7 +5802,6 @@ func ValidateSecret(secret *core.Secret) field.ErrorList { if _, exists := secret.Data[core.TLSPrivateKeyKey]; !exists { allErrs = append(allErrs, field.Required(dataPath.Key(core.TLSPrivateKeyKey), "")) } - // TODO: Verify that the key matches the cert. default: // no-op } @@ -5947,9 +5936,9 @@ func ValidateResourceRequirements(requirements *core.ResourceRequirements, podCl if exists { // For non overcommitable resources, not only requests can't exceed limits, they also can't be lower, i.e. must be equal. if quantity.Cmp(limitQuantity) != 0 && !helper.IsOvercommitAllowed(resourceName) { - allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s limit", resourceName))) + allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s limit of %s", resourceName, limitQuantity.String()))) } else if quantity.Cmp(limitQuantity) > 0 { - allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be less than or equal to %s limit", resourceName))) + allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be less than or equal to %s limit of %s", resourceName, limitQuantity.String()))) } } else if !helper.IsOvercommitAllowed(resourceName) { allErrs = append(allErrs, field.Required(limPath, "Limit must be set for non overcommitable resources")) @@ -6742,7 +6731,7 @@ var ( ) // validateTopologySpreadConstraints validates given TopologySpreadConstraints. -func validateTopologySpreadConstraints(constraints []core.TopologySpreadConstraint, fldPath *field.Path) field.ErrorList { +func validateTopologySpreadConstraints(constraints []core.TopologySpreadConstraint, fldPath *field.Path, opts PodValidationOptions) field.ErrorList { allErrs := field.ErrorList{} for i, constraint := range constraints { @@ -6768,6 +6757,9 @@ func validateTopologySpreadConstraints(constraints []core.TopologySpreadConstrai allErrs = append(allErrs, err) } allErrs = append(allErrs, validateMatchLabelKeys(subFldPath.Child("matchLabelKeys"), constraint.MatchLabelKeys, constraint.LabelSelector)...) + if !opts.AllowInvalidTopologySpreadConstraintLabelSelector { + allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(constraint.LabelSelector, unversionedvalidation.LabelSelectorValidationOptions{AllowInvalidLabelValueInSelector: false}, subFldPath.Child("labelSelector"))...) + } } return allErrs diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go index 857843371a36..919cf338f4e1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go @@ -5502,7 +5502,7 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { } if in.InternalTrafficPolicy != nil { in, out := &in.InternalTrafficPolicy, &out.InternalTrafficPolicy - *out = new(ServiceInternalTrafficPolicyType) + *out = new(ServiceInternalTrafficPolicy) **out = **in } return diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go index 4abb1afa233c..e82911b2a158 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go @@ -22,7 +22,6 @@ import ( "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/apis/networking" - "k8s.io/kubernetes/pkg/apis/policy" ) // GroupName is the group name use in this package @@ -60,8 +59,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &networking.IngressList{}, &apps.ReplicaSet{}, &apps.ReplicaSetList{}, - &policy.PodSecurityPolicy{}, - &policy.PodSecurityPolicyList{}, &autoscaling.Scale{}, &networking.NetworkPolicy{}, &networking.NetworkPolicyList{}, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/networking/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/networking/types.go index 7edd4cb34d76..864173b870bc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/networking/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/networking/types.go @@ -24,17 +24,18 @@ import ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// NetworkPolicy describes what network traffic is allowed for a set of Pods +// NetworkPolicy describes what network traffic is allowed for a set of pods type NetworkPolicy struct { metav1.TypeMeta + // +optional metav1.ObjectMeta - // Specification of the desired behavior for this NetworkPolicy. + // spec represents the specification of the desired behavior for this NetworkPolicy. // +optional Spec NetworkPolicySpec - // Status is the current state of the NetworkPolicy. + // status represents the current state of the NetworkPolicy. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status NetworkPolicyStatus @@ -53,16 +54,16 @@ const ( // NetworkPolicySpec provides the specification of a NetworkPolicy type NetworkPolicySpec struct { - // Selects the pods to which this NetworkPolicy object applies. The array of - // ingress rules is applied to any pods selected by this field. Multiple network - // policies can select the same set of pods. In this case, the ingress rules for - // each are combined additively. This field is NOT optional and follows standard - // label selector semantics. An empty podSelector matches all pods in this - // namespace. + // podSelector selects the pods to which this NetworkPolicy object applies. + // The array of ingress rules is applied to any pods selected by this field. + // Multiple network policies can select the same set of pods. In this case, + // the ingress rules for each are combined additively. + // This field is NOT optional and follows standard label selector semantics. + // An empty podSelector matches all pods in this namespace. PodSelector metav1.LabelSelector - // List of ingress rules to be applied to the selected pods. Traffic is allowed to - // a pod if there are no NetworkPolicies selecting the pod + // ingress is a list of ingress rules to be applied to the selected pods. + // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod // (and cluster policy otherwise allows the traffic), OR if the traffic source is // the pod's local node, OR if the traffic matches at least one ingress rule // across all of the NetworkPolicy objects whose podSelector matches the pod. If @@ -71,8 +72,8 @@ type NetworkPolicySpec struct { // +optional Ingress []NetworkPolicyIngressRule - // List of egress rules to be applied to the selected pods. Outgoing traffic is - // allowed if there are no NetworkPolicies selecting the pod (and cluster policy + // egress is a list of egress rules to be applied to the selected pods. Outgoing traffic + // is allowed if there are no NetworkPolicies selecting the pod (and cluster policy // otherwise allows the traffic), OR if the traffic matches at least one egress rule // across all of the NetworkPolicy objects whose podSelector matches the pod. If // this field is empty then this NetworkPolicy limits all outgoing traffic (and serves @@ -81,15 +82,15 @@ type NetworkPolicySpec struct { // +optional Egress []NetworkPolicyEgressRule - // List of rule types that the NetworkPolicy relates to. + // policyTypes is a list of rule types that the NetworkPolicy relates to. // Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. - // If this field is not specified, it will default based on the existence of Ingress or Egress rules; - // policies that contain an Egress section are assumed to affect Egress, and all policies - // (whether or not they contain an Ingress section) are assumed to affect Ingress. + // If this field is not specified, it will default based on the existence of ingress or egress rules; + // policies that contain an egress section are assumed to affect egress, and all policies + // (whether or not they contain an ingress section) are assumed to affect ingress. // If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. // Likewise, if you want to write a policy that specifies that no egress is allowed, // you must specify a policyTypes value that include "Egress" (since such a policy would not include - // an Egress section and would otherwise default to just [ "Ingress" ]). + // an egress section and would otherwise default to just [ "Ingress" ]). // This field is beta-level in 1.8 // +optional PolicyTypes []PolicyType @@ -98,15 +99,15 @@ type NetworkPolicySpec struct { // NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods // matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from. type NetworkPolicyIngressRule struct { - // List of ports which should be made accessible on the pods selected for this - // rule. Each item in this list is combined using a logical OR. If this field is + // ports is a list of ports which should be made accessible on the pods selected for + // this rule. Each item in this list is combined using a logical OR. If this field is // empty or missing, this rule matches all ports (traffic not restricted by port). // If this field is present and contains at least one item, then this rule allows // traffic only if the traffic matches at least one port in the list. // +optional Ports []NetworkPolicyPort - // List of sources which should be able to access the pods selected for this rule. + // from is a list of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all sources (traffic not restricted by // source). If this field is present and contains at least one item, this rule @@ -119,7 +120,7 @@ type NetworkPolicyIngressRule struct { // matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. // This type is beta-level in 1.8 type NetworkPolicyEgressRule struct { - // List of destination ports for outgoing traffic. + // ports is a list of destination ports for outgoing traffic. // Each item in this list is combined using a logical OR. If this field is // empty or missing, this rule matches all ports (traffic not restricted by port). // If this field is present and contains at least one item, then this rule allows @@ -127,7 +128,7 @@ type NetworkPolicyEgressRule struct { // +optional Ports []NetworkPolicyPort - // List of destinations for outgoing traffic of pods selected for this rule. + // to is a list of destinations for outgoing traffic of pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all destinations (traffic not restricted by // destination). If this field is present and contains at least one item, this rule @@ -138,19 +139,19 @@ type NetworkPolicyEgressRule struct { // NetworkPolicyPort describes a port to allow traffic on type NetworkPolicyPort struct { - // The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this - // field defaults to TCP. + // protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match. + // If not specified, this field defaults to TCP. // +optional Protocol *api.Protocol - // The port on the given protocol. This can either be a numerical or named + // port represents the port on the given protocol. This can either be a numerical or named // port on a pod. If this field is not provided, this matches all port names and // numbers. // If present, only traffic on the specified protocol AND port will be matched. // +optional Port *intstr.IntOrString - // If set, indicates that the range of ports from port to endPort, inclusive, + // endPort indicates that the range of ports from port to endPort if set, inclusive, // should be allowed by the policy. This field cannot be defined if the port field // is not defined or if the port field is defined as a named (string) port. // The endPort must be equal or greater than port. @@ -162,37 +163,38 @@ type NetworkPolicyPort struct { // to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs // that should not be included within this rule. type IPBlock struct { - // CIDR is a string representing the IP Block + // cidr is a string representing the IPBlock // Valid examples are "192.168.1.0/24" or "2001:db8::/64" CIDR string - // Except is a slice of CIDRs that should not be included within an IP Block + + // except is a list of CIDRs that should not be included within the IPBlock // Valid examples are "192.168.1.0/24" or "2001:db8::/64" - // Except values will be rejected if they are outside the CIDR range + // Except values will be rejected if they are outside the cidr range // +optional Except []string } // NetworkPolicyPeer describes a peer to allow traffic to/from. type NetworkPolicyPeer struct { - // This is a label selector which selects Pods. This field follows standard label + // podSelector is a label selector which selects pods. This field follows standard label // selector semantics; if present but empty, it selects all pods. // - // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects - // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. - // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. + // If namespaceSelector is also set, then the NetworkPolicyPeer as a whole selects + // the pods matching podSelector in the Namespaces selected by namespaceSelector. + // Otherwise it selects the pods matching podSelector in the policy's own namespace. // +optional PodSelector *metav1.LabelSelector - // Selects Namespaces using cluster-scoped labels. This field follows standard label - // selector semantics; if present but empty, it selects all namespaces. + // namespaceSelector selects namespaces using cluster-scoped labels. This field follows + // standard label selector semantics; if present but empty, it selects all namespaces. // - // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects - // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. - // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. + // If podSelector is also set, then the NetworkPolicyPeer as a whole selects + // the pods matching podSelector in the namespaces selected by namespaceSelector. + // Otherwise it selects all pods in the namespaces selected by namespaceSelector. // +optional NamespaceSelector *metav1.LabelSelector - // IPBlock defines policy on a particular IPBlock. If this field is set then + // ipBlock defines policy on a particular IPBlock. If this field is set then // neither of the other fields can be. // +optional IPBlock *IPBlock @@ -228,9 +230,9 @@ const ( NetworkPolicyConditionReasonFeatureNotSupported NetworkPolicyConditionReason = "FeatureNotSupported" ) -// NetworkPolicyStatus describe the current state of the NetworkPolicy. +// NetworkPolicyStatus describes the current state of the NetworkPolicy. type NetworkPolicyStatus struct { - // Conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. + // conditions holds an array of metav1.Condition that describes the state of the NetworkPolicy. Conditions []metav1.Condition } @@ -239,6 +241,7 @@ type NetworkPolicyStatus struct { // NetworkPolicyList is a list of NetworkPolicy objects. type NetworkPolicyList struct { metav1.TypeMeta + // +optional metav1.ListMeta @@ -253,17 +256,18 @@ type NetworkPolicyList struct { // based virtual hosting etc. type Ingress struct { metav1.TypeMeta + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta - // Spec is the desired state of the Ingress. + // spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec IngressSpec - // Status is the current state of the Ingress. + // status is the current state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status IngressStatus @@ -274,18 +278,19 @@ type Ingress struct { // IngressList is a collection of Ingress. type IngressList struct { metav1.TypeMeta + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta - // Items is the list of Ingress. + // items is the list of Ingress. Items []Ingress } // IngressSpec describes the Ingress the user wishes to exist. type IngressSpec struct { - // IngressClassName is the name of the IngressClass cluster resource. The + // ingressClassName is the name of the IngressClass cluster resource. The // associated IngressClass defines which controller will implement the // resource. This replaces the deprecated `kubernetes.io/ingress.class` // annotation. For backwards compatibility, when that annotation is set, it @@ -298,23 +303,23 @@ type IngressSpec struct { // +optional IngressClassName *string - // DefaultBackend is the backend that should handle requests that don't + // defaultBackend is the backend that should handle requests that don't // match any rule. If Rules are not specified, DefaultBackend must be specified. // If DefaultBackend is not set, the handling of requests that do not match any // of the rules will be up to the Ingress controller. // +optional DefaultBackend *IngressBackend - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified + // tls represents the TLS configuration. Currently the ingress only supports a + // single TLS port, 443. If multiple members of this list specify different hosts, + // they will be multiplexed on the same port according to the hostname specified // through the SNI TLS extension, if the ingress controller fulfilling the // ingress supports SNI. // +listType=atomic // +optional TLS []IngressTLS - // A list of host rules used to configure the Ingress. If unspecified, or + // rules is a list of host rules used to configure the Ingress. If unspecified, or // no rule matches, all traffic is sent to the default backend. // +listType=atomic // +optional @@ -330,9 +335,10 @@ type IngressSpec struct { // resources without a class specified will be assigned this default class. type IngressClass struct { metav1.TypeMeta + metav1.ObjectMeta - // Spec is the desired state of the IngressClass. + // spec is the desired state of the IngressClass. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec IngressClassSpec @@ -340,15 +346,15 @@ type IngressClass struct { // IngressClassSpec provides information about the class of an Ingress. type IngressClassSpec struct { - // Controller refers to the name of the controller that should handle this + // controller refers to the name of the controller that should handle this // class. This allows for different "flavors" that are controlled by the - // same controller. For example, you may have different Parameters for the + // same controller. For example, you may have different parameters for the // same implementing controller. This should be specified as a // domain-prefixed path no more than 250 characters in length, e.g. // "acme.io/ingress-controller". This field is immutable. Controller string - // Parameters is a link to a custom resource containing additional + // parameters is a link to a custom resource containing additional // configuration for the controller. This is optional if the controller does // not require extra parameters. // +optional @@ -367,20 +373,24 @@ const ( // IngressClassParametersReference identifies an API object. This can be used // to specify a cluster or namespace-scoped resource. type IngressClassParametersReference struct { - // APIGroup is the group for the resource being referenced. If APIGroup is - // not specified, the specified Kind must be in the core API group. For any - // other third-party types, APIGroup is required. + // apiGroup is the group for the resource being referenced. If apiGroup is + // not specified, the specified kind must be in the core API group. For any + // other third-party types, apiGroup is required. // +optional APIGroup *string - // Kind is the type of resource being referenced. + + // kind is the type of resource being referenced. Kind string - // Name is the name of resource being referenced. + + // name is the name of resource being referenced. Name string - // Scope represents if this refers to a cluster or namespace scoped resource. + + // scope represents if this refers to a cluster or namespace scoped resource. // This may be set to "Cluster" (default) or "Namespace". // +optional Scope *string - // Namespace is the namespace of the resource being referenced. This field is + + // namespace is the namespace of the resource being referenced. This field is // required when scope is set to "Namespace" and must be unset when scope is set to // "Cluster". // +optional @@ -392,71 +402,73 @@ type IngressClassParametersReference struct { // IngressClassList is a collection of IngressClasses. type IngressClassList struct { metav1.TypeMeta + // Standard object's metadata. // +optional metav1.ListMeta - // Items is the list of IngressClasses. + // items is the list of IngressClasses. Items []IngressClass } -// IngressTLS describes the transport layer security associated with an Ingress. +// IngressTLS describes the transport layer security associated with an ingress. type IngressTLS struct { - // Hosts are a list of hosts included in the TLS certificate. The values in + // hosts is a list of hosts included in the TLS certificate. The values in // this list must match the name/s used in the tlsSecret. Defaults to the // wildcard host setting for the loadbalancer controller fulfilling this // Ingress, if left unspecified. // +listType=atomic // +optional Hosts []string - // SecretName is the name of the secret used to terminate TLS traffic on + + // secretName is the name of the secret used to terminate TLS traffic on // port 443. Field is left optional to allow TLS routing based on SNI // hostname alone. If the SNI host in a listener conflicts with the "Host" // header field used by an IngressRule, the SNI host is used for termination - // and value of the Host header is used for routing. + // and value of the "Host" header is used for routing. // +optional SecretName string // TODO: Consider specifying different modes of termination, protocols etc. } -// IngressStatus describe the current state of the Ingress. +// IngressStatus describes the current state of the Ingress. type IngressStatus struct { - // LoadBalancer contains the current status of the load-balancer. + // loadBalancer contains the current status of the load-balancer. // +optional LoadBalancer IngressLoadBalancerStatus } // IngressLoadBalancerStatus represents the status of a load-balancer type IngressLoadBalancerStatus struct { - // Ingress is a list containing ingress points for the load-balancer. + // ingress is a list containing ingress points for the load-balancer. // +optional Ingress []IngressLoadBalancerIngress } // IngressLoadBalancerIngress represents the status of a load-balancer ingress point. type IngressLoadBalancerIngress struct { - // IP is set for load-balancer ingress points that are IP based. + // ip is set for load-balancer ingress points that are IP based. // +optional IP string - // Hostname is set for load-balancer ingress points that are DNS based. + // hostname is set for load-balancer ingress points that are DNS based. // +optional Hostname string - // Ports provides information about the ports exposed by this LoadBalancer. + // ports provides information about the ports exposed by this LoadBalancer. // +optional Ports []IngressPortStatus } // IngressPortStatus represents the error condition of an ingress port type IngressPortStatus struct { - // Port is the port number of the ingress port. + // port is the port number of the ingress port. Port int32 - // Protocol is the protocol of the ingress port. + // protocol is the protocol of the ingress port. Protocol api.Protocol - // Error indicates a problem on this port. + // error indicates a problem on this port. // The format of the error must comply with the following rules: // - Kubernetes-defined error values use CamelCase names // - Provider-specific error values must follow label-name style (e.g. @@ -469,7 +481,7 @@ type IngressPortStatus struct { // host match, then routed to the backend associated with the matching // IngressRuleValue. type IngressRule struct { - // Host is the fully qualified domain name of a network host, as defined by RFC 3986. + // host is the fully qualified domain name of a network host, as defined by RFC 3986. // Note the following deviations from the "host" part of the // URI as defined in RFC 3986: // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to @@ -482,17 +494,18 @@ type IngressRule struct { // IngressRuleValue. If the host is unspecified, the Ingress routes all // traffic based on the specified IngressRuleValue. // - // Host can be "precise" which is a domain name without the terminating dot of + // host can be "precise" which is a domain name without the terminating dot of // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name // prefixed with a single wildcard label (e.g. "*.foo.com"). // The wildcard character '*' must appear by itself as the first DNS label and // matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*"). - // Requests will be matched against the Host field in the following way: - // 1. If Host is precise, the request matches this rule if the http host header is equal to Host. - // 2. If Host is a wildcard, then the request matches this rule if the http host header + // Requests will be matched against the host field in the following way: + // 1. If host is precise, the request matches this rule if the http host header is equal to Host. + // 2. If host is a wildcard, then the request matches this rule if the http host header // is to equal to the suffix (removing the first label) of the wildcard rule. // +optional Host string + // IngressRuleValue represents a rule to route requests for this // IngressRule. If unspecified, the rule defaults to a http catch-all. // Whether that sends just traffic matching the host to the default backend @@ -524,7 +537,7 @@ type IngressRuleValue struct { // to match against everything after the last '/' and before the first '?' // or '#'. type HTTPIngressRuleValue struct { - // A collection of paths that map requests to backends. + // paths is a collection of paths that map requests to backends. // +listType=atomic Paths []HTTPIngressPath // TODO: Consider adding fields for ingress-type specific global @@ -564,32 +577,32 @@ const ( // HTTPIngressPath associates a path with a backend. Incoming urls matching the // path are forwarded to the backend. type HTTPIngressPath struct { - // Path is matched against the path of an incoming request. Currently it can + // path is matched against the path of an incoming request. Currently it can // contain characters disallowed from the conventional "path" part of a URL // as defined by RFC 3986. Paths must begin with a '/' and must be present // when using PathType with value "Exact" or "Prefix". // +optional Path string - // PathType determines the interpretation of the Path matching. PathType can + // pathType determines the interpretation of the path matching. PathType can // be one of Exact, Prefix, or ImplementationSpecific. Implementations are // required to support all path types. // +optional PathType *PathType - // Backend defines the referenced service endpoint to which the traffic + // backend defines the referenced service endpoint to which the traffic // will be forwarded to. Backend IngressBackend } // IngressBackend describes all endpoints for a given service and port. type IngressBackend struct { - // Service references a Service as a Backend. + // service references a service as a backend. // This is a mutually exclusive setting with "Resource". // +optional Service *IngressServiceBackend - // Resource is an ObjectRef to another Kubernetes resource in the namespace + // resource is an ObjectRef to another Kubernetes resource in the namespace // of the Ingress object. If resource is specified, a service.Name and // service.Port must not be specified. // This is a mutually exclusive setting with "Service". @@ -599,24 +612,24 @@ type IngressBackend struct { // IngressServiceBackend references a Kubernetes Service as a Backend. type IngressServiceBackend struct { - // Name is the referenced service. The service must exist in + // name is the referenced service. The service must exist in // the same namespace as the Ingress object. Name string - // Port of the referenced service. A port name or port number + // port of the referenced service. A port name or port number // is required for a IngressServiceBackend. Port ServiceBackendPort } // ServiceBackendPort is the service port being referenced. type ServiceBackendPort struct { - // Name is the name of the port on the Service. + // name is the name of the port on the Service. // This must be an IANA_SVC_NAME (following RFC6335). // This is a mutually exclusive setting with "Number". // +optional Name string - // Number is the numerical port number (e.g. 80) on the Service. + // number is the numerical port number (e.g. 80) on the Service. // This is a mutually exclusive setting with "Name". // +optional Number int32 @@ -637,6 +650,7 @@ type ServiceBackendPort struct { // selector matches the Node may be used. type ClusterCIDR struct { metav1.TypeMeta + metav1.ObjectMeta Spec ClusterCIDRSpec @@ -644,13 +658,13 @@ type ClusterCIDR struct { // ClusterCIDRSpec defines the desired state of ClusterCIDR. type ClusterCIDRSpec struct { - // NodeSelector defines which nodes the config is applicable to. - // An empty or nil NodeSelector selects all nodes. + // nodeSelector defines which nodes the config is applicable to. + // An empty or nil nodeSelector selects all nodes. // This field is immutable. // +optional NodeSelector *api.NodeSelector - // PerNodeHostBits defines the number of host bits to be configured per node. + // perNodeHostBits defines the number of host bits to be configured per node. // A subnet mask determines how much of the address is used for network bits // and host bits. For example an IPv4 address of 192.168.0.0/24, splits the // address into 24 bits for the network portion and 8 bits for the host portion. @@ -660,14 +674,14 @@ type ClusterCIDRSpec struct { // +required PerNodeHostBits int32 - // IPv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). - // At least one of IPv4 and IPv6 must be specified. + // ipv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). + // At least one of ipv4 and ipv6 must be specified. // This field is immutable. // +optional IPv4 string - // IPv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64"). - // At least one of IPv4 and IPv6 must be specified. + // ipv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64"). + // At least one of ipv4 and ipv6 must be specified. // This field is immutable. // +optional IPv6 string @@ -682,6 +696,6 @@ type ClusterCIDRList struct { // +optional metav1.ListMeta - // Items is the list of ClusterCIDRs. + // items is the list of ClusterCIDRs. Items []ClusterCIDR } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/types.go index 5a3d533a7f14..0f86126dc855 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/types.go @@ -51,7 +51,7 @@ type PriorityClass struct { // +optional metav1.ObjectMeta - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. Value int32 @@ -63,12 +63,12 @@ type PriorityClass struct { // +optional GlobalDefault bool - // Description is an arbitrary string that usually provides guidelines on + // description is an arbitrary string that usually provides guidelines on // when this priority class should be used. // +optional Description string - // PreemptionPolicy it the Policy for preempting pods with lower priority. + // preemptionPolicy it the Policy for preempting pods with lower priority. // This field is beta-level. // +optional PreemptionPolicy *core.PreemptionPolicy @@ -84,6 +84,6 @@ type PriorityClassList struct { // +optional metav1.ListMeta - // Items is the list of PriorityClasses. + // items is the list of PriorityClasses. Items []PriorityClass } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go index 0b3a397f8f88..740c98d32a88 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go @@ -143,9 +143,8 @@ type PodControllerRefManager struct { // If CanAdopt() returns a non-nil error, all adoptions will fail. // // NOTE: Once CanAdopt() is called, it will not be called again by the same -// -// PodControllerRefManager instance. Create a new instance if it makes -// sense to check CanAdopt() again (e.g. in a different sync pass). +// PodControllerRefManager instance. Create a new instance if it makes +// sense to check CanAdopt() again (e.g. in a different sync pass). func NewPodControllerRefManager( podControl PodControlInterface, controller metav1.Object, @@ -284,9 +283,8 @@ type ReplicaSetControllerRefManager struct { // If CanAdopt() returns a non-nil error, all adoptions will fail. // // NOTE: Once CanAdopt() is called, it will not be called again by the same -// -// ReplicaSetControllerRefManager instance. Create a new instance if it -// makes sense to check CanAdopt() again (e.g. in a different sync pass). +// ReplicaSetControllerRefManager instance. Create a new instance if it +// makes sense to check CanAdopt() again (e.g. in a different sync pass). func NewReplicaSetControllerRefManager( rsControl RSControlInterface, controller metav1.Object, @@ -423,9 +421,8 @@ type ControllerRevisionControllerRefManager struct { // If canAdopt() returns a non-nil error, all adoptions will fail. // // NOTE: Once canAdopt() is called, it will not be called again by the same -// -// ControllerRevisionControllerRefManager instance. Create a new instance if it -// makes sense to check canAdopt() again (e.g. in a different sync pass). +// ControllerRevisionControllerRefManager instance. Create a new instance if it +// makes sense to check canAdopt() again (e.g. in a different sync pass). func NewControllerRevisionControllerRefManager( crControl ControllerRevisionControlInterface, controller metav1.Object, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go index 5c897da432dc..2f4bc44be019 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go @@ -1039,12 +1039,12 @@ func AddOrUpdateTaintOnNode(ctx context.Context, c clientset.Interface, nodeName var oldNode *v1.Node // First we try getting node from the API server cache, as it's cheaper. If it fails // we get it from etcd to be sure to have fresh data. + option := metav1.GetOptions{} if firstTry { - oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{ResourceVersion: "0"}) + option.ResourceVersion = "0" firstTry = false - } else { - oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) } + oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, option) if err != nil { return err } @@ -1096,12 +1096,12 @@ func RemoveTaintOffNode(ctx context.Context, c clientset.Interface, nodeName str var oldNode *v1.Node // First we try getting node from the API server cache, as it's cheaper. If it fails // we get it from etcd to be sure to have fresh data. + option := metav1.GetOptions{} if firstTry { - oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{ResourceVersion: "0"}) + option.ResourceVersion = "0" firstTry = false - } else { - oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) } + oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, option) if err != nil { return err } @@ -1178,12 +1178,12 @@ func AddOrUpdateLabelsOnNode(kubeClient clientset.Interface, nodeName string, la var node *v1.Node // First we try getting node from the API server cache, as it's cheaper. If it fails // we get it from etcd to be sure to have fresh data. + option := metav1.GetOptions{} if firstTry { - node, err = kubeClient.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"}) + option.ResourceVersion = "0" firstTry = false - } else { - node, err = kubeClient.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) } + node, err = kubeClient.CoreV1().Nodes().Get(context.TODO(), nodeName, option) if err != nil { return err } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/daemon/daemon_controller.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/daemon/daemon_controller.go index 1263214d329c..1b9c3d2598af 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/daemon/daemon_controller.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/daemon/daemon_controller.go @@ -887,6 +887,32 @@ func (dsc *DaemonSetsController) podsShouldBeOnNode( return nodesNeedingDaemonPods, podsToDelete } +func (dsc *DaemonSetsController) updateDaemonSet(ctx context.Context, ds *apps.DaemonSet, nodeList []*v1.Node, hash, key string, old []*apps.ControllerRevision) error { + err := dsc.manage(ctx, ds, nodeList, hash) + if err != nil { + return err + } + + // Process rolling updates if we're ready. + if dsc.expectations.SatisfiedExpectations(key) { + switch ds.Spec.UpdateStrategy.Type { + case apps.OnDeleteDaemonSetStrategyType: + case apps.RollingUpdateDaemonSetStrategyType: + err = dsc.rollingUpdate(ctx, ds, nodeList, hash) + } + if err != nil { + return err + } + } + + err = dsc.cleanupHistory(ctx, ds, old) + if err != nil { + return fmt.Errorf("failed to clean up revisions of DaemonSet: %w", err) + } + + return nil +} + // manage manages the scheduling and running of Pods of ds on nodes. // After figuring out which nodes should run a Pod of ds but not yet running one and // which nodes should not run a Pod of ds but currently running one, it calls function @@ -1104,28 +1130,30 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ctx context.Context, ds * if shouldRun { desiredNumberScheduled++ - if scheduled { - currentNumberScheduled++ - // Sort the daemon pods by creation time, so that the oldest is first. - daemonPods, _ := nodeToDaemonPods[node.Name] - sort.Sort(podByCreationTimestampAndPhase(daemonPods)) - pod := daemonPods[0] - if podutil.IsPodReady(pod) { - numberReady++ - if podutil.IsPodAvailable(pod, ds.Spec.MinReadySeconds, metav1.Time{Time: now}) { - numberAvailable++ - } - } - // If the returned error is not nil we have a parse error. - // The controller handles this via the hash. - generation, err := util.GetTemplateGeneration(ds) - if err != nil { - generation = nil - } - if util.IsPodUpdated(pod, hash, generation) { - updatedNumberScheduled++ + if !scheduled { + continue + } + + currentNumberScheduled++ + // Sort the daemon pods by creation time, so that the oldest is first. + daemonPods, _ := nodeToDaemonPods[node.Name] + sort.Sort(podByCreationTimestampAndPhase(daemonPods)) + pod := daemonPods[0] + if podutil.IsPodReady(pod) { + numberReady++ + if podutil.IsPodAvailable(pod, ds.Spec.MinReadySeconds, metav1.Time{Time: now}) { + numberAvailable++ } } + // If the returned error is not nil we have a parse error. + // The controller handles this via the hash. + generation, err := util.GetTemplateGeneration(ds) + if err != nil { + generation = nil + } + if util.IsPodUpdated(pod, hash, generation) { + updatedNumberScheduled++ + } } else { if scheduled { numberMisscheduled++ @@ -1136,7 +1164,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ctx context.Context, ds * err = storeDaemonSetStatus(ctx, dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable, updateObservedGen) if err != nil { - return fmt.Errorf("error storing status for daemon set %#v: %v", ds, err) + return fmt.Errorf("error storing status for daemon set %#v: %w", ds, err) } // Resync the DaemonSet after MinReadySeconds as a last line of defense to guard against clock-skew. @@ -1210,29 +1238,21 @@ func (dsc *DaemonSetsController) syncDaemonSet(ctx context.Context, key string) return dsc.updateDaemonSetStatus(ctx, ds, nodeList, hash, false) } - err = dsc.manage(ctx, ds, nodeList, hash) - if err != nil { + err = dsc.updateDaemonSet(ctx, ds, nodeList, hash, dsKey, old) + statusErr := dsc.updateDaemonSetStatus(ctx, ds, nodeList, hash, true) + switch { + case err != nil && statusErr != nil: + // If there was an error, and we failed to update status, + // log it and return the original error. + klog.ErrorS(statusErr, "Failed to update status", "daemonSet", klog.KObj(ds)) return err + case err != nil: + return err + case statusErr != nil: + return statusErr } - // Process rolling updates if we're ready. - if dsc.expectations.SatisfiedExpectations(dsKey) { - switch ds.Spec.UpdateStrategy.Type { - case apps.OnDeleteDaemonSetStrategyType: - case apps.RollingUpdateDaemonSetStrategyType: - err = dsc.rollingUpdate(ctx, ds, nodeList, hash) - } - if err != nil { - return err - } - } - - err = dsc.cleanupHistory(ctx, ds, old) - if err != nil { - return fmt.Errorf("failed to clean up revisions of DaemonSet: %v", err) - } - - return dsc.updateDaemonSetStatus(ctx, ds, nodeList, hash, true) + return nil } // NodeShouldRunDaemonPod checks a set of preconditions against a (node,daemonset) and returns a @@ -1325,14 +1345,17 @@ func getUnscheduledPodsWithoutNode(runningNodesList []*v1.Node, nodeToDaemonPods for _, node := range runningNodesList { isNodeRunning[node.Name] = true } + for n, pods := range nodeToDaemonPods { - if !isNodeRunning[n] { - for _, pod := range pods { - if len(pod.Spec.NodeName) == 0 { - results = append(results, pod.Name) - } + if isNodeRunning[n] { + continue + } + for _, pod := range pods { + if len(pod.Spec.NodeName) == 0 { + results = append(results, pod.Name) } } } + return results } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go index cb2fac36342b..c8f12613b72e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go @@ -303,7 +303,7 @@ var annotationsToSkip = map[string]bool{ // skipCopyAnnotation returns true if we should skip copying the annotation with the given annotation key // TODO: How to decide which annotations should / should not be copied? // -// See https://github.com/kubernetes/kubernetes/pull/20035#issuecomment-179558615 +// See https://github.com/kubernetes/kubernetes/pull/20035#issuecomment-179558615 func skipCopyAnnotation(key string) bool { return annotationsToSkip[key] } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/config.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/config.go index dccfea1cd200..86ce18542c85 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/config.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/config.go @@ -223,15 +223,6 @@ func ReadURL(url string, client *http.Client, header *http.Header) (body []byte, return contents, nil } -// ReadDockerConfigFileFromURL read a docker config file from the given url -func ReadDockerConfigFileFromURL(url string, client *http.Client, header *http.Header) (cfg DockerConfig, err error) { - if contents, err := ReadURL(url, client, header); err == nil { - return ReadDockerConfigFileFromBytes(contents) - } - - return nil, err -} - // ReadDockerConfigFileFromBytes read a docker config file from the given bytes func ReadDockerConfigFileFromBytes(contents []byte) (cfg DockerConfig, err error) { if err = json.Unmarshal(contents, &cfg); err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/features/kube_features.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/features/kube_features.go index 8847b83094f6..a10ff26f7921 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/features/kube_features.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/features/kube_features.go @@ -108,21 +108,6 @@ const ( // Allow the usage of options to fine-tune the cpumanager policies. CPUManagerPolicyOptions featuregate.Feature = "CPUManagerPolicyOptions" - // owner: @pohly - // alpha: v1.14 - // beta: v1.16 - // GA: v1.25 - // - // Enables CSI Inline volumes support for pods - CSIInlineVolume featuregate.Feature = "CSIInlineVolume" - - // owner: @davidz627 - // alpha: v1.14 - // beta: v1.17 - // - // Enables the in-tree storage to CSI Plugin migration feature. - CSIMigration featuregate.Feature = "CSIMigration" - // owner: @leakingtapan // alpha: v1.14 // beta: v1.17 @@ -223,13 +208,6 @@ const ( // Enables support for time zones in CronJobs. CronJobTimeZone featuregate.Feature = "CronJobTimeZone" - // owner: @smarterclayton - // alpha: v1.21 - // beta: v1.22 - // GA: v1.25 - // DaemonSets allow workloads to maintain availability during update per node - DaemonSetUpdateSurge featuregate.Feature = "DaemonSetUpdateSurge" - // owner: @gnufied, @verult, @bertinatto // alpha: v1.22 // beta: v1.23 @@ -291,14 +269,6 @@ const ( // Enable Terminating condition in Endpoint Slices. EndpointSliceTerminatingCondition featuregate.Feature = "EndpointSliceTerminatingCondition" - // owner: @verb - // alpha: v1.16 - // beta: v1.23 - // GA: v1.25 - // - // Allows running an ephemeral container in pod namespaces to troubleshoot a running pod. - EphemeralContainers featuregate.Feature = "EphemeralContainers" - // owner: @harche // kep: http://kep.k8s.io/3386 // alpha: v1.25 @@ -315,25 +285,6 @@ const ( // Lock to default and remove after v1.22 based on user feedback that should be reflected in KEP #1972 update ExecProbeTimeout featuregate.Feature = "ExecProbeTimeout" - // owner: @gnufied - // alpha: v1.14 - // beta: v1.16 - // GA: 1.24 - // Ability to expand CSI volumes - ExpandCSIVolumes featuregate.Feature = "ExpandCSIVolumes" - - // owner: @mlmhl @gnufied - // beta: v1.15 - // GA: 1.24 - // Ability to expand persistent volumes' file system without unmounting volumes. - ExpandInUsePersistentVolumes featuregate.Feature = "ExpandInUsePersistentVolumes" - - // owner: @gnufied - // beta: v1.11 - // GA: 1.24 - // Ability to Expand persistent volumes - ExpandPersistentVolumes featuregate.Feature = "ExpandPersistentVolumes" - // owner: @gjkim42 // kep: https://kep.k8s.io/2595 // alpha: v1.22 @@ -391,14 +342,6 @@ const ( // deletion ordering. HonorPVReclaimPolicy featuregate.Feature = "HonorPVReclaimPolicy" - // owner: @ravig - // alpha: v1.23 - // beta: v1.24 - // GA: v1.25 - // IdentifyPodOS allows user to specify OS on which they'd like the Pod run. The user should still set the nodeSelector - // with appropriate `kubernetes.io/os` label for scheduler to identify appropriate node for the pod to run. - IdentifyPodOS featuregate.Feature = "IdentifyPodOS" - // owner: @leakingtapan // alpha: v1.21 // @@ -528,24 +471,19 @@ const ( // owner: @zshihang // kep: https://kep.k8s.io/2800 // beta: v1.24 + // ga: v1.26 // // Stop auto-generation of secret-based service account tokens. LegacyServiceAccountTokenNoAutoGeneration featuregate.Feature = "LegacyServiceAccountTokenNoAutoGeneration" // owner: @zshihang // kep: http://kep.k8s.io/2800 - // alpha: v1.25 + // alpha: v1.26 + // beta: v1.27 // // Enables tracking of secret-based service account tokens usage. LegacyServiceAccountTokenTracking featuregate.Feature = "LegacyServiceAccountTokenTracking" - // owner: @jinxu - // beta: v1.10 - // stable: v1.25 - // - // Support local ephemeral storage types for local storage capacity isolation feature. - LocalStorageCapacityIsolation featuregate.Feature = "LocalStorageCapacityIsolation" - // owner: @RobertKrawitz // alpha: v1.15 // @@ -617,15 +555,6 @@ const ( // Enables the MultiCIDR Range allocator. MultiCIDRRangeAllocator featuregate.Feature = "MultiCIDRRangeAllocator" - // owner: @rikatz - // kep: https://kep.k8s.io/2079 - // alpha: v1.21 - // beta: v1.22 - // ga: v1.25 - // - // Enables the endPort field in NetworkPolicy to enable a Port Range behavior in Network Policies. - NetworkPolicyEndPort featuregate.Feature = "NetworkPolicyEndPort" - // owner: @rikatz // kep: https://kep.k8s.io/2943 // alpha: v1.24 @@ -796,14 +725,6 @@ const ( // Enables policies controlling deletion of PVCs created by a StatefulSet. StatefulSetAutoDeletePVC featuregate.Feature = "StatefulSetAutoDeletePVC" - // owner: @ravig - // kep: https://kep.k8s.io/2607 - // alpha: v1.22 - // beta: v1.23 - // GA: v1.25 - // StatefulSetMinReadySeconds allows minReadySeconds to be respected by StatefulSet controller - StatefulSetMinReadySeconds featuregate.Feature = "StatefulSetMinReadySeconds" - // owner: @psch // alpha: v1.26 // @@ -939,15 +860,11 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS CPUManagerPolicyOptions: {Default: true, PreRelease: featuregate.Beta}, - CSIInlineVolume: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27 - - CSIMigration: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27 - CSIMigrationAWS: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27 CSIMigrationAzureDisk: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.26 - CSIMigrationAzureFile: {Default: true, PreRelease: featuregate.GA}, // remove in 1.28 + CSIMigrationAzureFile: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28 CSIMigrationGCE: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27 @@ -971,8 +888,6 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS CronJobTimeZone: {Default: true, PreRelease: featuregate.Beta}, - DaemonSetUpdateSurge: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27 - DelegateFSGroupToCSIDriver: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28 DevicePlugins: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.26 @@ -989,18 +904,10 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS DynamicResourceAllocation: {Default: false, PreRelease: featuregate.Alpha}, - EphemeralContainers: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27 - EventedPLEG: {Default: false, PreRelease: featuregate.Alpha}, ExecProbeTimeout: {Default: true, PreRelease: featuregate.GA}, // lock to default and remove after v1.22 based on KEP #1972 update - ExpandCSIVolumes: {Default: true, PreRelease: featuregate.GA}, // remove in 1.26 - - ExpandInUsePersistentVolumes: {Default: true, PreRelease: featuregate.GA}, // remove in 1.26 - - ExpandPersistentVolumes: {Default: true, PreRelease: featuregate.GA}, // remove in 1.26 - ExpandedDNSConfig: {Default: true, PreRelease: featuregate.Beta}, ExperimentalHostUserNamespaceDefaultingGate: {Default: false, PreRelease: featuregate.Beta}, @@ -1015,8 +922,6 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS HonorPVReclaimPolicy: {Default: false, PreRelease: featuregate.Alpha}, - IdentifyPodOS: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27 - InTreePluginAWSUnregister: {Default: false, PreRelease: featuregate.Alpha}, InTreePluginAzureDiskUnregister: {Default: false, PreRelease: featuregate.Alpha}, @@ -1053,11 +958,9 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS KubeletTracing: {Default: false, PreRelease: featuregate.Alpha}, - LegacyServiceAccountTokenNoAutoGeneration: {Default: true, PreRelease: featuregate.GA}, + LegacyServiceAccountTokenNoAutoGeneration: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 - LegacyServiceAccountTokenTracking: {Default: false, PreRelease: featuregate.Alpha}, - - LocalStorageCapacityIsolation: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27 + LegacyServiceAccountTokenTracking: {Default: true, PreRelease: featuregate.Beta}, LocalStorageCapacityIsolationFSQuotaMonitoring: {Default: false, PreRelease: featuregate.Alpha}, @@ -1071,7 +974,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS MemoryQoS: {Default: false, PreRelease: featuregate.Alpha}, - MinDomainsInPodTopologySpread: {Default: false, PreRelease: featuregate.Beta}, + MinDomainsInPodTopologySpread: {Default: true, PreRelease: featuregate.Beta}, MinimizeIPTablesRestore: {Default: false, PreRelease: featuregate.Alpha}, @@ -1079,8 +982,6 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS MultiCIDRRangeAllocator: {Default: false, PreRelease: featuregate.Alpha}, - NetworkPolicyEndPort: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27 - NetworkPolicyStatus: {Default: false, PreRelease: featuregate.Alpha}, NodeOutOfServiceVolumeDetach: {Default: true, PreRelease: featuregate.Beta}, @@ -1127,8 +1028,6 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS StatefulSetAutoDeletePVC: {Default: false, PreRelease: featuregate.Alpha}, - StatefulSetMinReadySeconds: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27 - StatefulSetStartOrdinal: {Default: false, PreRelease: featuregate.Alpha}, TopologyAwareHints: {Default: true, PreRelease: featuregate.Beta}, @@ -1168,7 +1067,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS genericfeatures.APIResponseCompression: {Default: true, PreRelease: featuregate.Beta}, - genericfeatures.AdvancedAuditing: {Default: true, PreRelease: featuregate.GA}, + genericfeatures.AdvancedAuditing: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28 genericfeatures.ValidatingAdmissionPolicy: {Default: false, PreRelease: featuregate.Alpha}, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go index 04432b9fc69f..e0a30fe26dbd 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go @@ -450,6 +450,7 @@ type KubeletConfiguration struct { // registerNode enables automatic registration with the apiserver. // +optional RegisterNode bool + // Tracing specifies the versioned configuration for OpenTelemetry tracing clients. // See https://kep.k8s.io/2832 for more details. // +featureGate=KubeletTracing @@ -465,6 +466,16 @@ type KubeletConfiguration struct { // disabled. Once disabled, user should not set request/limit for container's ephemeral storage, or sizeLimit for emptyDir. // +optional LocalStorageCapacityIsolation bool + + // ContainerRuntimeEndpoint is the endpoint of container runtime. + // unix domain sockets supported on Linux while npipes and tcp endpoints are supported for windows. + // Examples:'unix:///path/to/runtime.sock', 'npipe:////./pipe/runtime' + ContainerRuntimeEndpoint string + + // ImageServiceEndpoint is the endpoint of container image service. + // If not specified the default value is ContainerRuntimeEndpoint + // +optional + ImageServiceEndpoint string } // KubeletAuthorizationMode denotes the authorization mode for the kubelet diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1/defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1/defaults.go index 4b9397b734fe..018368d4e513 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1/defaults.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1/defaults.go @@ -264,4 +264,7 @@ func SetDefaults_KubeletConfiguration(obj *kubeletconfigv1beta1.KubeletConfigura if obj.LocalStorageCapacityIsolation == nil { obj.LocalStorageCapacityIsolation = utilpointer.BoolPtr(true) } + if obj.ContainerRuntimeEndpoint == "" { + obj.ContainerRuntimeEndpoint = "unix:///run/containerd/containerd.sock" + } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1/zz_generated.conversion.go index 3f289820445b..43f08cc63c2a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1/zz_generated.conversion.go @@ -512,6 +512,8 @@ func autoConvert_v1beta1_KubeletConfiguration_To_config_KubeletConfiguration(in if err := v1.Convert_Pointer_bool_To_bool(&in.LocalStorageCapacityIsolation, &out.LocalStorageCapacityIsolation, s); err != nil { return err } + out.ContainerRuntimeEndpoint = in.ContainerRuntimeEndpoint + out.ImageServiceEndpoint = in.ImageServiceEndpoint return nil } @@ -691,6 +693,8 @@ func autoConvert_config_KubeletConfiguration_To_v1beta1_KubeletConfiguration(in if err := v1.Convert_bool_To_Pointer_bool(&in.LocalStorageCapacityIsolation, &out.LocalStorageCapacityIsolation, s); err != nil { return err } + out.ContainerRuntimeEndpoint = in.ContainerRuntimeEndpoint + out.ImageServiceEndpoint = in.ImageServiceEndpoint return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/validation/validation.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/validation/validation.go index dc1c2b7c007b..aa4c2c5fcfa7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/validation/validation.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/validation/validation.go @@ -257,5 +257,9 @@ func ValidateKubeletConfiguration(kc *kubeletconfig.KubeletConfiguration, featur allErrors = append(allErrors, fmt.Errorf("invalid configuration: memoryThrottlingFactor %v must be greater than 0 and less than or equal to 1.0", *kc.MemoryThrottlingFactor)) } + if kc.ContainerRuntimeEndpoint == "" { + allErrors = append(allErrors, fmt.Errorf("invalid configuration: the containerRuntimeEndpoint was not specified or empty")) + } + return utilerrors.NewAggregate(allErrors) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/server_v1.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/server_v1.go index 948aa6c91008..20774a8cc11a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/server_v1.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/server_v1.go @@ -88,8 +88,6 @@ func (p *v1PodResourcesServer) GetAllocatableResources(ctx context.Context, req return nil, fmt.Errorf("Pod Resources API GetAllocatableResources disabled") } - metrics.PodResourcesEndpointRequestsTotalCount.WithLabelValues("v1").Inc() - return &v1.AllocatableResourcesResponse{ Devices: p.devicesProvider.GetAllocatableDevices(), CpuIds: p.cpusProvider.GetAllocatableCPUs(), diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_linux.go index 370e374df55a..26b5c60eb879 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_linux.go @@ -39,9 +39,7 @@ import ( cadvisorapiv2 "github.com/google/cadvisor/info/v2" "github.com/google/cadvisor/manager" "github.com/google/cadvisor/utils/sysfs" - utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/klog/v2" - kubefeatures "k8s.io/kubernetes/pkg/features" "k8s.io/utils/pointer" ) @@ -94,11 +92,6 @@ func New(imageFsInfoProvider ImageFsInfoProvider, rootPath string, cgroupRoots [ cadvisormetrics.OOMMetrics: struct{}{}, } - // Only add the Accelerator metrics if the feature is inactive - if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.DisableAcceleratorUsageMetrics) { - includedMetrics[cadvisormetrics.AcceleratorUsageMetrics] = struct{}{} - } - if usingLegacyStats || localStorageCapacityIsolation { includedMetrics[cadvisormetrics.DiskUsageMetrics] = struct{}{} } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap/bootstrap.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap/bootstrap.go index 0db0f1dc3f41..2caea4c24f38 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap/bootstrap.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap/bootstrap.go @@ -19,6 +19,7 @@ package bootstrap import ( "context" "crypto" + "crypto/rsa" "crypto/sha512" "crypto/x509" "crypto/x509/pkix" @@ -329,9 +330,11 @@ func requestNodeCertificate(ctx context.Context, client clientset.Interface, pri usages := []certificatesv1.KeyUsage{ certificatesv1.UsageDigitalSignature, - certificatesv1.UsageKeyEncipherment, certificatesv1.UsageClientAuth, } + if _, ok := privateKey.(*rsa.PrivateKey); ok { + usages = append(usages, certificatesv1.UsageKeyEncipherment) + } // The Signer interface contains the Public() method to get the public key. signer, ok := privateKey.(crypto.Signer) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/kubelet.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/kubelet.go index d27777554473..8293894acea5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/kubelet.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/kubelet.go @@ -105,23 +105,10 @@ func NewKubeletServerCertificateManager(kubeClient clientset.Interface, kubeCfg } m, err := certificate.NewManager(&certificate.Config{ - ClientsetFn: clientsetFn, - GetTemplate: getTemplate, - SignerName: certificates.KubeletServingSignerName, - Usages: []certificates.KeyUsage{ - // https://tools.ietf.org/html/rfc5280#section-4.2.1.3 - // - // Digital signature allows the certificate to be used to verify - // digital signatures used during TLS negotiation. - certificates.UsageDigitalSignature, - // KeyEncipherment allows the cert/key pair to be used to encrypt - // keys, including the symmetric keys negotiated during TLS setup - // and used for data transfer. - certificates.UsageKeyEncipherment, - // ServerAuth allows the cert to be used by a TLS server to - // authenticate itself to a TLS client. - certificates.UsageServerAuth, - }, + ClientsetFn: clientsetFn, + GetTemplate: getTemplate, + SignerName: certificates.KubeletServingSignerName, + GetUsages: certificate.DefaultKubeletServingGetUsages, CertificateStore: certificateStore, CertificateRotation: certificateRotationAge, CertificateRenewFailure: certificateRenewFailure, @@ -230,22 +217,7 @@ func NewKubeletClientCertificateManager( }, }, SignerName: certificates.KubeAPIServerClientKubeletSignerName, - Usages: []certificates.KeyUsage{ - // https://tools.ietf.org/html/rfc5280#section-4.2.1.3 - // - // DigitalSignature allows the certificate to be used to verify - // digital signatures including signatures used during TLS - // negotiation. - certificates.UsageDigitalSignature, - // KeyEncipherment allows the cert/key pair to be used to encrypt - // keys, including the symmetric keys negotiated during TLS setup - // and used for data transfer.. - certificates.UsageKeyEncipherment, - // ClientAuth allows the cert to be used by a TLS client to - // authenticate itself to the TLS server. - certificates.UsageClientAuth, - }, - + GetUsages: certificate.DefaultKubeletClientGetUsages, // For backwards compatibility, the kubelet supports the ability to // provide a higher privileged certificate as initial data that will // then be rotated immediately. This code path is used by kubeadm on diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go index 4db702a100bd..be7481f433da 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go @@ -306,8 +306,8 @@ func (m *cgroupManagerImpl) Destroy(cgroupConfig *CgroupConfig) error { return nil } -// getCpuWeight converts from the range [2, 262144] to [1, 10000] -func getCpuWeight(cpuShares *uint64) uint64 { +// getCPUWeight converts from the range [2, 262144] to [1, 10000] +func getCPUWeight(cpuShares *uint64) uint64 { if cpuShares == nil { return 0 } @@ -359,18 +359,18 @@ func (m *cgroupManagerImpl) toResources(resourceConfig *ResourceConfig) *libcont if resourceConfig.Memory != nil { resources.Memory = *resourceConfig.Memory } - if resourceConfig.CpuShares != nil { + if resourceConfig.CPUShares != nil { if libcontainercgroups.IsCgroup2UnifiedMode() { - resources.CpuWeight = getCpuWeight(resourceConfig.CpuShares) + resources.CpuWeight = getCPUWeight(resourceConfig.CPUShares) } else { - resources.CpuShares = *resourceConfig.CpuShares + resources.CpuShares = *resourceConfig.CPUShares } } - if resourceConfig.CpuQuota != nil { - resources.CpuQuota = *resourceConfig.CpuQuota + if resourceConfig.CPUQuota != nil { + resources.CpuQuota = *resourceConfig.CPUQuota } - if resourceConfig.CpuPeriod != nil { - resources.CpuPeriod = *resourceConfig.CpuPeriod + if resourceConfig.CPUPeriod != nil { + resources.CpuPeriod = *resourceConfig.CPUPeriod } if resourceConfig.PidsLimit != nil { resources.PidsLimit = *resourceConfig.PidsLimit @@ -530,7 +530,7 @@ func (m *cgroupManagerImpl) ReduceCPULimits(cgroupName CgroupName) error { // Set lowest possible CpuShares value for the cgroup minimumCPUShares := uint64(MinShares) resources := &ResourceConfig{ - CpuShares: &minimumCPUShares, + CPUShares: &minimumCPUShares, } containerConfig := &CgroupConfig{ Name: cgroupName, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go index 82d4640cb448..cb3a265073a0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go @@ -199,7 +199,7 @@ func ParseQOSReserved(m map[string]string) (*map[v1.ResourceName]int64, error) { case v1.ResourceMemory: q, err := parsePercentage(v) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to parse percentage %q for %q resource: %w", v, k, err) } reservations[v1.ResourceName(k)] = q default: diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go index 332e99e72ee6..96b84aac717a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go @@ -972,16 +972,24 @@ func (cm *containerManagerImpl) GetAllocatableDevices() []*podresourcesapi.Conta return containerDevicesFromResourceDeviceInstances(cm.deviceManager.GetAllocatableDevices()) } +func int64Slice(in []int) []int64 { + out := make([]int64, len(in)) + for i := range in { + out[i] = int64(in[i]) + } + return out +} + func (cm *containerManagerImpl) GetCPUs(podUID, containerName string) []int64 { if cm.cpuManager != nil { - return cm.cpuManager.GetExclusiveCPUs(podUID, containerName).ToSliceNoSortInt64() + return int64Slice(cm.cpuManager.GetExclusiveCPUs(podUID, containerName).UnsortedList()) } return []int64{} } func (cm *containerManagerImpl) GetAllocatableCPUs() []int64 { if cm.cpuManager != nil { - return cm.cpuManager.GetAllocatableCPUs().ToSliceNoSortInt64() + return int64Slice(cm.cpuManager.GetAllocatableCPUs().UnsortedList()) } return []int64{} } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_assignment.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_assignment.go index e8d02ca4ab87..cdf5e5197c8b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_assignment.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_assignment.go @@ -128,7 +128,7 @@ func (n *numaFirst) takeFullSecondLevel() { // If NUMA nodes are higher in the memory hierarchy than sockets, then just // sort the NUMA nodes directly, and return them. func (n *numaFirst) sortAvailableNUMANodes() []int { - numas := n.acc.details.NUMANodes().ToSliceNoSort() + numas := n.acc.details.NUMANodes().UnsortedList() n.acc.sort(numas, n.acc.details.CPUsInNUMANodes) return numas } @@ -139,7 +139,7 @@ func (n *numaFirst) sortAvailableNUMANodes() []int { func (n *numaFirst) sortAvailableSockets() []int { var result []int for _, numa := range n.sortAvailableNUMANodes() { - sockets := n.acc.details.SocketsInNUMANodes(numa).ToSliceNoSort() + sockets := n.acc.details.SocketsInNUMANodes(numa).UnsortedList() n.acc.sort(sockets, n.acc.details.CPUsInSockets) result = append(result, sockets...) } @@ -151,7 +151,7 @@ func (n *numaFirst) sortAvailableSockets() []int { func (n *numaFirst) sortAvailableCores() []int { var result []int for _, socket := range n.acc.sortAvailableSockets() { - cores := n.acc.details.CoresInSockets(socket).ToSliceNoSort() + cores := n.acc.details.CoresInSockets(socket).UnsortedList() n.acc.sort(cores, n.acc.details.CPUsInCores) result = append(result, cores...) } @@ -176,7 +176,7 @@ func (s *socketsFirst) takeFullSecondLevel() { func (s *socketsFirst) sortAvailableNUMANodes() []int { var result []int for _, socket := range s.sortAvailableSockets() { - numas := s.acc.details.NUMANodesInSockets(socket).ToSliceNoSort() + numas := s.acc.details.NUMANodesInSockets(socket).UnsortedList() s.acc.sort(numas, s.acc.details.CPUsInNUMANodes) result = append(result, numas...) } @@ -186,7 +186,7 @@ func (s *socketsFirst) sortAvailableNUMANodes() []int { // If sockets are higher in the memory hierarchy than NUMA nodes, then just // sort the sockets directly, and return them. func (s *socketsFirst) sortAvailableSockets() []int { - sockets := s.acc.details.Sockets().ToSliceNoSort() + sockets := s.acc.details.Sockets().UnsortedList() s.acc.sort(sockets, s.acc.details.CPUsInSockets) return sockets } @@ -196,7 +196,7 @@ func (s *socketsFirst) sortAvailableSockets() []int { func (s *socketsFirst) sortAvailableCores() []int { var result []int for _, numa := range s.acc.sortAvailableNUMANodes() { - cores := s.acc.details.CoresInNUMANodes(numa).ToSliceNoSort() + cores := s.acc.details.CoresInNUMANodes(numa).UnsortedList() s.acc.sort(cores, s.acc.details.CPUsInCores) result = append(result, cores...) } @@ -216,7 +216,7 @@ func newCPUAccumulator(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, topo: topo, details: topo.CPUDetails.KeepOnly(availableCPUs), numCPUsNeeded: numCPUs, - result: cpuset.NewCPUSet(), + result: cpuset.New(), } if topo.NumSockets >= topo.NumNUMANodes { @@ -323,7 +323,7 @@ func (a *cpuAccumulator) sortAvailableCores() []int { func (a *cpuAccumulator) sortAvailableCPUs() []int { var result []int for _, core := range a.sortAvailableCores() { - cpus := a.details.CPUsInCores(core).ToSliceNoSort() + cpus := a.details.CPUsInCores(core).UnsortedList() sort.Ints(cpus) result = append(result, cpus...) } @@ -372,7 +372,7 @@ func (a *cpuAccumulator) takeFullCores() { func (a *cpuAccumulator) takeRemainingCPUs() { for _, cpu := range a.sortAvailableCPUs() { klog.V(4).InfoS("takeRemainingCPUs: claiming CPU", "cpu", cpu) - a.take(cpuset.NewCPUSet(cpu)) + a.take(cpuset.New(cpu)) if a.isSatisfied() { return } @@ -453,7 +453,7 @@ func takeByTopologyNUMAPacked(topo *topology.CPUTopology, availableCPUs cpuset.C return acc.result, nil } if acc.isFailed() { - return cpuset.NewCPUSet(), fmt.Errorf("not enough cpus available to satisfy request") + return cpuset.New(), fmt.Errorf("not enough cpus available to satisfy request") } // Algorithm: topology-aware best-fit @@ -485,7 +485,7 @@ func takeByTopologyNUMAPacked(topo *topology.CPUTopology, availableCPUs cpuset.C return acc.result, nil } - return cpuset.NewCPUSet(), fmt.Errorf("failed to allocate cpus") + return cpuset.New(), fmt.Errorf("failed to allocate cpus") } // takeByTopologyNUMADistributed returns a CPUSet of size 'numCPUs'. @@ -565,7 +565,7 @@ func takeByTopologyNUMADistributed(topo *topology.CPUTopology, availableCPUs cpu return acc.result, nil } if acc.isFailed() { - return cpuset.NewCPUSet(), fmt.Errorf("not enough cpus available to satisfy request") + return cpuset.New(), fmt.Errorf("not enough cpus available to satisfy request") } // Get the list of NUMA nodes represented by the set of CPUs in 'availableCPUs'. @@ -763,13 +763,13 @@ func takeByTopologyNUMADistributed(topo *topology.CPUTopology, availableCPUs cpu // If we haven't allocated all of our CPUs at this point, then something // went wrong in our accounting and we should error out. if acc.numCPUsNeeded > 0 { - return cpuset.NewCPUSet(), fmt.Errorf("accounting error, not enough CPUs allocated, remaining: %v", acc.numCPUsNeeded) + return cpuset.New(), fmt.Errorf("accounting error, not enough CPUs allocated, remaining: %v", acc.numCPUsNeeded) } // Likewise, if we have allocated too many CPUs at this point, then something // went wrong in our accounting and we should error out. if acc.numCPUsNeeded < 0 { - return cpuset.NewCPUSet(), fmt.Errorf("accounting error, too many CPUs allocated, remaining: %v", acc.numCPUsNeeded) + return cpuset.New(), fmt.Errorf("accounting error, too many CPUs allocated, remaining: %v", acc.numCPUsNeeded) } // Otherwise, return the result diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_none.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_none.go index 1e35f6a094ed..c5c151a78b3e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_none.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_none.go @@ -72,5 +72,5 @@ func (p *nonePolicy) GetPodTopologyHints(s state.State, pod *v1.Pod) map[string] // CAN get exclusive access to core(s). // Hence, we return empty set here: no cpus are assignable according to above definition with this policy. func (p *nonePolicy) GetAllocatableCPUs(m state.State) cpuset.CPUSet { - return cpuset.NewCPUSet() + return cpuset.New() } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go index 4d7f7c0b0ded..660da92fba43 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go @@ -217,7 +217,7 @@ func (p *staticPolicy) validateState(s state.State) error { tmpCPUSets = append(tmpCPUSets, cset) } } - totalKnownCPUs = totalKnownCPUs.UnionAll(tmpCPUSets) + totalKnownCPUs = totalKnownCPUs.Union(tmpCPUSets...) if !totalKnownCPUs.Equals(p.topology.CPUDetails.CPUs()) { return fmt.Errorf("current set of available CPUs \"%s\" doesn't match with CPUs in state \"%s\"", p.topology.CPUDetails.CPUs().String(), totalKnownCPUs.String()) @@ -245,7 +245,7 @@ func (p *staticPolicy) updateCPUsToReuse(pod *v1.Pod, container *v1.Container, c } // If no cpuset exists for cpusToReuse by this pod yet, create one. if _, ok := p.cpusToReuse[string(pod.UID)]; !ok { - p.cpusToReuse[string(pod.UID)] = cpuset.NewCPUSet() + p.cpusToReuse[string(pod.UID)] = cpuset.New() } // Check if the container is an init container. // If so, add its cpuset to the cpuset of reusable CPUs for any new allocations. @@ -316,7 +316,7 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai // getAssignedCPUsOfSiblings returns assigned cpus of given container's siblings(all containers other than the given container) in the given pod `podUID`. func getAssignedCPUsOfSiblings(s state.State, podUID string, containerName string) cpuset.CPUSet { assignments := s.GetCPUAssignments() - cset := cpuset.NewCPUSet() + cset := cpuset.New() for name, cpus := range assignments[podUID] { if containerName == name { continue @@ -344,7 +344,7 @@ func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int, numaAffinity bit allocatableCPUs := p.GetAvailableCPUs(s).Union(reusableCPUs) // If there are aligned CPUs in numaAffinity, attempt to take those first. - result := cpuset.NewCPUSet() + result := cpuset.New() if numaAffinity != nil { alignedCPUs := p.getAlignedCPUs(numaAffinity, allocatableCPUs) @@ -355,7 +355,7 @@ func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int, numaAffinity bit alignedCPUs, err := p.takeByTopology(alignedCPUs, numAlignedToAlloc) if err != nil { - return cpuset.NewCPUSet(), err + return cpuset.New(), err } result = result.Union(alignedCPUs) @@ -364,7 +364,7 @@ func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int, numaAffinity bit // Get any remaining CPUs from what's leftover after attempting to grab aligned ones. remainingCPUs, err := p.takeByTopology(allocatableCPUs.Difference(result), numCPUs-result.Size()) if err != nil { - return cpuset.NewCPUSet(), err + return cpuset.New(), err } result = result.Union(remainingCPUs) @@ -486,7 +486,7 @@ func (p *staticPolicy) GetPodTopologyHints(s state.State, pod *v1.Pod) map[strin return nil } - assignedCPUs := cpuset.NewCPUSet() + assignedCPUs := cpuset.New() for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) { requestedByContainer := p.guaranteedCPUs(pod, &container) // Short circuit to regenerate the same hints if there are already @@ -544,7 +544,7 @@ func (p *staticPolicy) generateCPUTopologyHints(availableCPUs cpuset.CPUSet, reu // Iterate through all combinations of numa nodes bitmask and build hints from them. hints := []topologymanager.TopologyHint{} - bitmask.IterateBitMasks(p.topology.CPUDetails.NUMANodes().ToSlice(), func(mask bitmask.BitMask) { + bitmask.IterateBitMasks(p.topology.CPUDetails.NUMANodes().List(), func(mask bitmask.BitMask) { // First, update minAffinitySize for the current request size. cpusInMask := p.topology.CPUDetails.CPUsInNUMANodes(mask.GetBits()...).Size() if cpusInMask >= request && mask.Count() < minAffinitySize { @@ -554,7 +554,7 @@ func (p *staticPolicy) generateCPUTopologyHints(availableCPUs cpuset.CPUSet, reu // Then check to see if we have enough CPUs available on the current // numa node bitmask to satisfy the CPU request. numMatching := 0 - for _, c := range reusableCPUs.ToSlice() { + for _, c := range reusableCPUs.List() { // Disregard this mask if its NUMANode isn't part of it. if !mask.IsSet(p.topology.CPUDetails[c].NUMANodeID) { return @@ -564,7 +564,7 @@ func (p *staticPolicy) generateCPUTopologyHints(availableCPUs cpuset.CPUSet, reu // Finally, check to see if enough available CPUs remain on the current // NUMA node combination to satisfy the CPU request. - for _, c := range availableCPUs.ToSlice() { + for _, c := range availableCPUs.List() { if mask.IsSet(p.topology.CPUDetails[c].NUMANodeID) { numMatching++ } @@ -616,14 +616,14 @@ func (p *staticPolicy) isHintSocketAligned(hint topologymanager.TopologyHint, mi // getAlignedCPUs return set of aligned CPUs based on numa affinity mask and configured policy options. func (p *staticPolicy) getAlignedCPUs(numaAffinity bitmask.BitMask, allocatableCPUs cpuset.CPUSet) cpuset.CPUSet { - alignedCPUs := cpuset.NewCPUSet() + alignedCPUs := cpuset.New() numaBits := numaAffinity.GetBits() // If align-by-socket policy option is enabled, NUMA based hint is expanded to // socket aligned hint. It will ensure that first socket aligned available CPUs are // allocated before we try to find CPUs across socket to satisfy allocation request. if p.options.AlignBySocket { - socketBits := p.topology.CPUDetails.SocketsInNUMANodes(numaBits...).ToSliceNoSort() + socketBits := p.topology.CPUDetails.SocketsInNUMANodes(numaBits...).UnsortedList() for _, socketID := range socketBits { alignedCPUs = alignedCPUs.Union(allocatableCPUs.Intersection(p.topology.CPUDetails.CPUsInSockets(socketID))) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_checkpoint.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_checkpoint.go index c9aacd190519..9297f23757dd 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_checkpoint.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_checkpoint.go @@ -18,7 +18,7 @@ package state import ( "fmt" - "path" + "path/filepath" "sync" "k8s.io/klog/v2" @@ -56,7 +56,7 @@ func NewCheckpointState(stateDir, checkpointName, policyName string, initialCont if err := stateCheckpoint.restoreState(); err != nil { //nolint:staticcheck // ST1005 user-facing error message return nil, fmt.Errorf("could not restore state from checkpoint: %v, please drain this node and delete the CPU manager checkpoint file %q before restarting Kubelet", - err, path.Join(stateDir, checkpointName)) + err, filepath.Join(stateDir, checkpointName)) } return stateCheckpoint, nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_mem.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_mem.go index f51423f18674..8f3a10d95b2d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_mem.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_mem.go @@ -36,7 +36,7 @@ func NewMemoryState() State { klog.InfoS("Initialized new in-memory state store") return &stateMemory{ assignments: ContainerCPUAssignments{}, - defaultCPUSet: cpuset.NewCPUSet(), + defaultCPUSet: cpuset.New(), } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go index c9b0849261ff..7defe3400d23 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go @@ -83,138 +83,138 @@ func (d CPUDetails) KeepOnly(cpus cpuset.CPUSet) CPUDetails { // NUMANodes returns all of the NUMANode IDs associated with the CPUs in this // CPUDetails. func (d CPUDetails) NUMANodes() cpuset.CPUSet { - b := cpuset.NewBuilder() + var numaNodeIDs []int for _, info := range d { - b.Add(info.NUMANodeID) + numaNodeIDs = append(numaNodeIDs, info.NUMANodeID) } - return b.Result() + return cpuset.New(numaNodeIDs...) } // NUMANodesInSockets returns all of the logical NUMANode IDs associated with // the given socket IDs in this CPUDetails. func (d CPUDetails) NUMANodesInSockets(ids ...int) cpuset.CPUSet { - b := cpuset.NewBuilder() + var numaNodeIDs []int for _, id := range ids { for _, info := range d { if info.SocketID == id { - b.Add(info.NUMANodeID) + numaNodeIDs = append(numaNodeIDs, info.NUMANodeID) } } } - return b.Result() + return cpuset.New(numaNodeIDs...) } // Sockets returns all of the socket IDs associated with the CPUs in this // CPUDetails. func (d CPUDetails) Sockets() cpuset.CPUSet { - b := cpuset.NewBuilder() + var socketIDs []int for _, info := range d { - b.Add(info.SocketID) + socketIDs = append(socketIDs, info.SocketID) } - return b.Result() + return cpuset.New(socketIDs...) } // CPUsInSockets returns all of the logical CPU IDs associated with the given // socket IDs in this CPUDetails. func (d CPUDetails) CPUsInSockets(ids ...int) cpuset.CPUSet { - b := cpuset.NewBuilder() + var cpuIDs []int for _, id := range ids { for cpu, info := range d { if info.SocketID == id { - b.Add(cpu) + cpuIDs = append(cpuIDs, cpu) } } } - return b.Result() + return cpuset.New(cpuIDs...) } // SocketsInNUMANodes returns all of the logical Socket IDs associated with the // given NUMANode IDs in this CPUDetails. func (d CPUDetails) SocketsInNUMANodes(ids ...int) cpuset.CPUSet { - b := cpuset.NewBuilder() + var socketIDs []int for _, id := range ids { for _, info := range d { if info.NUMANodeID == id { - b.Add(info.SocketID) + socketIDs = append(socketIDs, info.SocketID) } } } - return b.Result() + return cpuset.New(socketIDs...) } // Cores returns all of the core IDs associated with the CPUs in this // CPUDetails. func (d CPUDetails) Cores() cpuset.CPUSet { - b := cpuset.NewBuilder() + var coreIDs []int for _, info := range d { - b.Add(info.CoreID) + coreIDs = append(coreIDs, info.CoreID) } - return b.Result() + return cpuset.New(coreIDs...) } // CoresInNUMANodes returns all of the core IDs associated with the given // NUMANode IDs in this CPUDetails. func (d CPUDetails) CoresInNUMANodes(ids ...int) cpuset.CPUSet { - b := cpuset.NewBuilder() + var coreIDs []int for _, id := range ids { for _, info := range d { if info.NUMANodeID == id { - b.Add(info.CoreID) + coreIDs = append(coreIDs, info.CoreID) } } } - return b.Result() + return cpuset.New(coreIDs...) } // CoresInSockets returns all of the core IDs associated with the given socket // IDs in this CPUDetails. func (d CPUDetails) CoresInSockets(ids ...int) cpuset.CPUSet { - b := cpuset.NewBuilder() + var coreIDs []int for _, id := range ids { for _, info := range d { if info.SocketID == id { - b.Add(info.CoreID) + coreIDs = append(coreIDs, info.CoreID) } } } - return b.Result() + return cpuset.New(coreIDs...) } // CPUs returns all of the logical CPU IDs in this CPUDetails. func (d CPUDetails) CPUs() cpuset.CPUSet { - b := cpuset.NewBuilder() + var cpuIDs []int for cpuID := range d { - b.Add(cpuID) + cpuIDs = append(cpuIDs, cpuID) } - return b.Result() + return cpuset.New(cpuIDs...) } // CPUsInNUMANodes returns all of the logical CPU IDs associated with the given // NUMANode IDs in this CPUDetails. func (d CPUDetails) CPUsInNUMANodes(ids ...int) cpuset.CPUSet { - b := cpuset.NewBuilder() + var cpuIDs []int for _, id := range ids { for cpu, info := range d { if info.NUMANodeID == id { - b.Add(cpu) + cpuIDs = append(cpuIDs, cpu) } } } - return b.Result() + return cpuset.New(cpuIDs...) } // CPUsInCores returns all of the logical CPU IDs associated with the given // core IDs in this CPUDetails. func (d CPUDetails) CPUsInCores(ids ...int) cpuset.CPUSet { - b := cpuset.NewBuilder() + var cpuIDs []int for _, id := range ids { for cpu, info := range d { if info.CoreID == id { - b.Add(cpu) + cpuIDs = append(cpuIDs, cpu) } } } - return b.Result() + return cpuset.New(cpuIDs...) } // Discover returns CPUTopology based on cadvisor node info @@ -261,7 +261,7 @@ func getUniqueCoreID(threads []int) (coreID int, err error) { return 0, fmt.Errorf("no cpus provided") } - if len(threads) != cpuset.NewCPUSet(threads...).Size() { + if len(threads) != cpuset.New(threads...).Size() { return 0, fmt.Errorf("cpus provided are not unique") } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpuset/cpuset.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpuset/cpuset.go index 78f68c43a334..4c910d334a23 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpuset/cpuset.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpuset/cpuset.go @@ -14,75 +14,48 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package cpuset represents a collection of CPUs in a 'set' data structure. +// +// It can be used to represent core IDs, hyper thread siblings, CPU nodes, or processor IDs. +// +// The only special thing about this package is that +// methods are provided to convert back and forth from Linux 'list' syntax. +// See http://man7.org/linux/man-pages/man7/cpuset.7.html#FORMATS for details. +// +// Future work can migrate this to use a 'set' library, and relax the dubious 'immutable' property. package cpuset import ( "bytes" "fmt" - "os" "reflect" "sort" "strconv" "strings" - - "k8s.io/klog/v2" ) -// Builder is a mutable builder for CPUSet. Functions that mutate instances -// of this type are not thread-safe. -type Builder struct { - result CPUSet - done bool -} - -// NewBuilder returns a mutable CPUSet builder. -func NewBuilder() *Builder { - return &Builder{ - result: CPUSet{ - elems: map[int]struct{}{}, - }, - } -} - -// Add adds the supplied elements to the result. Calling Add after calling -// Result has no effect. -func (b *Builder) Add(elems ...int) { - if b.done { - return - } - for _, elem := range elems { - b.result.elems[elem] = struct{}{} - } -} - -// Result returns the result CPUSet containing all elements that were -// previously added to this builder. Subsequent calls to Add have no effect. -func (b *Builder) Result() CPUSet { - b.done = true - return b.result -} - // CPUSet is a thread-safe, immutable set-like data structure for CPU IDs. type CPUSet struct { elems map[int]struct{} } -// NewCPUSet returns a new CPUSet containing the supplied elements. -func NewCPUSet(cpus ...int) CPUSet { - b := NewBuilder() +// New returns a new CPUSet containing the supplied elements. +func New(cpus ...int) CPUSet { + s := CPUSet{ + elems: map[int]struct{}{}, + } for _, c := range cpus { - b.Add(c) + s.add(c) } - return b.Result() + return s } -// NewCPUSetInt64 returns a new CPUSet containing the supplied elements, as slice of int64. -func NewCPUSetInt64(cpus ...int64) CPUSet { - b := NewBuilder() - for _, c := range cpus { - b.Add(int(c)) +// add adds the supplied elements to the CPUSet. +// It is intended for internal use only, since it mutates the CPUSet. +func (s CPUSet) add(elems ...int) { + for _, elem := range elems { + s.elems[elem] = struct{}{} } - return b.Result() } // Size returns the number of elements in this set. @@ -107,29 +80,16 @@ func (s CPUSet) Equals(s2 CPUSet) bool { return reflect.DeepEqual(s.elems, s2.elems) } -// Filter returns a new CPU set that contains all of the elements from this +// filter returns a new CPU set that contains all of the elements from this // set that match the supplied predicate, without mutating the source set. -func (s CPUSet) Filter(predicate func(int) bool) CPUSet { - b := NewBuilder() +func (s CPUSet) filter(predicate func(int) bool) CPUSet { + r := New() for cpu := range s.elems { if predicate(cpu) { - b.Add(cpu) + r.add(cpu) } } - return b.Result() -} - -// FilterNot returns a new CPU set that contains all of the elements from this -// set that do not match the supplied predicate, without mutating the source -// set. -func (s CPUSet) FilterNot(predicate func(int) bool) CPUSet { - b := NewBuilder() - for cpu := range s.elems { - if !predicate(cpu) { - b.Add(cpu) - } - } - return b.Result() + return r } // IsSubsetOf returns true if the supplied set contains all the elements @@ -145,63 +105,46 @@ func (s CPUSet) IsSubsetOf(s2 CPUSet) bool { } // Union returns a new CPU set that contains all of the elements from this -// set and all of the elements from the supplied set, without mutating -// either source set. -func (s CPUSet) Union(s2 CPUSet) CPUSet { - b := NewBuilder() - for cpu := range s.elems { - b.Add(cpu) - } - for cpu := range s2.elems { - b.Add(cpu) - } - return b.Result() -} - -// UnionAll returns a new CPU set that contains all of the elements from this // set and all of the elements from the supplied sets, without mutating // either source set. -func (s CPUSet) UnionAll(s2 []CPUSet) CPUSet { - b := NewBuilder() +func (s CPUSet) Union(s2 ...CPUSet) CPUSet { + r := New() for cpu := range s.elems { - b.Add(cpu) + r.add(cpu) } for _, cs := range s2 { for cpu := range cs.elems { - b.Add(cpu) + r.add(cpu) } } - return b.Result() + return r } // Intersection returns a new CPU set that contains all of the elements // that are present in both this set and the supplied set, without mutating // either source set. func (s CPUSet) Intersection(s2 CPUSet) CPUSet { - return s.Filter(func(cpu int) bool { return s2.Contains(cpu) }) + return s.filter(func(cpu int) bool { return s2.Contains(cpu) }) } // Difference returns a new CPU set that contains all of the elements that // are present in this set and not the supplied set, without mutating either // source set. func (s CPUSet) Difference(s2 CPUSet) CPUSet { - return s.FilterNot(func(cpu int) bool { return s2.Contains(cpu) }) + return s.filter(func(cpu int) bool { return !s2.Contains(cpu) }) } -// ToSlice returns a slice of integers that contains all elements from -// this set. -func (s CPUSet) ToSlice() []int { - result := make([]int, 0, len(s.elems)) - for cpu := range s.elems { - result = append(result, cpu) - } +// List returns a slice of integers that contains all elements from +// this set. The list is sorted. +func (s CPUSet) List() []int { + result := s.UnsortedList() sort.Ints(result) return result } -// ToSliceNoSort returns a slice of integers that contains all elements from +// UnsortedList returns a slice of integers that contains all elements from // this set. -func (s CPUSet) ToSliceNoSort() []int { +func (s CPUSet) UnsortedList() []int { result := make([]int, 0, len(s.elems)) for cpu := range s.elems { result = append(result, cpu) @@ -209,27 +152,6 @@ func (s CPUSet) ToSliceNoSort() []int { return result } -// ToSliceInt64 returns an ordered slice of int64 that contains all elements from -// this set -func (s CPUSet) ToSliceInt64() []int64 { - result := make([]int64, 0, len(s.elems)) - for cpu := range s.elems { - result = append(result, int64(cpu)) - } - sort.Slice(result, func(i, j int) bool { return result[i] < result[j] }) - return result -} - -// ToSliceNoSortInt64 returns a slice of int64 that contains all elements from -// this set. -func (s CPUSet) ToSliceNoSortInt64() []int64 { - result := make([]int64, 0, len(s.elems)) - for cpu := range s.elems { - result = append(result, int64(cpu)) - } - return result -} - // String returns a new string representation of the elements in this CPU set // in canonical linux CPU list format. // @@ -239,7 +161,7 @@ func (s CPUSet) String() string { return "" } - elems := s.ToSlice() + elems := s.List() type rng struct { start int @@ -273,29 +195,17 @@ func (s CPUSet) String() string { return strings.TrimRight(result.String(), ",") } -// MustParse CPUSet constructs a new CPU set from a Linux CPU list formatted -// string. Unlike Parse, it does not return an error but rather panics if the -// input cannot be used to construct a CPU set. -func MustParse(s string) CPUSet { - res, err := Parse(s) - if err != nil { - klog.ErrorS(err, "Failed to parse input as CPUSet", "input", s) - os.Exit(1) - } - return res -} - // Parse CPUSet constructs a new CPU set from a Linux CPU list formatted string. // // See: http://man7.org/linux/man-pages/man7/cpuset.7.html#FORMATS func Parse(s string) (CPUSet, error) { - b := NewBuilder() - // Handle empty string. if s == "" { - return b.Result(), nil + return New(), nil } + result := New() + // Split CPU list string: // "0-5,34,46-48" => ["0-5", "34", "46-48"] ranges := strings.Split(s, ",") @@ -306,39 +216,39 @@ func Parse(s string) (CPUSet, error) { // Handle ranges that consist of only one element like "34". elem, err := strconv.Atoi(boundaries[0]) if err != nil { - return NewCPUSet(), err + return New(), err } - b.Add(elem) + result.add(elem) } else if len(boundaries) == 2 { // Handle multi-element ranges like "0-5". start, err := strconv.Atoi(boundaries[0]) if err != nil { - return NewCPUSet(), err + return New(), err } end, err := strconv.Atoi(boundaries[1]) if err != nil { - return NewCPUSet(), err + return New(), err } if start > end { - return NewCPUSet(), fmt.Errorf("invalid range %q (%d >= %d)", r, start, end) + return New(), fmt.Errorf("invalid range %q (%d >= %d)", r, start, end) } // start == end is acceptable (1-1 -> 1) // Add all elements to the result. // e.g. "0-5", "46-48" => [0, 1, 2, 3, 4, 5, 46, 47, 48]. for e := start; e <= end; e++ { - b.Add(e) + result.add(e) } } } - return b.Result(), nil + return result, nil } // Clone returns a copy of this CPU set. func (s CPUSet) Clone() CPUSet { - b := NewBuilder() + r := New() for elem := range s.elems { - b.Add(elem) + r.add(elem) } - return b.Result() + return r } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1/stub.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1/stub.go index 95ff4c7c0aaa..1d226f72a853 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1/stub.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1/stub.go @@ -20,7 +20,7 @@ import ( "context" "net" "os" - "path" + "path/filepath" "sync" "time" @@ -205,7 +205,7 @@ func (m *Stub) Register(kubeletEndpoint, resourceName string, pluginSockDir stri client := pluginapi.NewRegistrationClient(conn) reqt := &pluginapi.RegisterRequest{ Version: pluginapi.Version, - Endpoint: path.Base(m.socket), + Endpoint: filepath.Base(m.socket), ResourceName: resourceName, Options: &pluginapi.DevicePluginOptions{ PreStartRequired: m.preStartContainerFlag, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/plugin/client.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/plugin/client.go index 49ab5cb1e3ce..03e781f8a7e7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/plugin/client.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/plugin/client.go @@ -110,9 +110,9 @@ func (r *draPluginClient) NodePrepareResource( klog.V(4).InfoS( log("calling NodePrepareResource rpc"), "namespace", namespace, - "claim UID", claimUID, - "claim name", claimName, - "resource handle", resourceHandle) + "claimUID", claimUID, + "claimName", claimName, + "resourceHandle", resourceHandle) if r.nodeV1ClientCreator == nil { return nil, errors.New("failed to call NodePrepareResource. nodeV1ClientCreator is nil") @@ -144,9 +144,9 @@ func (r *draPluginClient) NodeUnprepareResource( klog.V(4).InfoS( log("calling NodeUnprepareResource rpc"), "namespace", namespace, - "claim UID", claimUID, - "claim name", claimName, - "cdi devices", cdiDevices) + "claimUID", claimUID, + "claimname", claimName, + "cdiDevices", cdiDevices) if r.nodeV1ClientCreator == nil { return nil, errors.New("nodeV1ClientCreate is nil") diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_linux.go index e0292496fe9a..8cf15d1f1f12 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_linux.go @@ -186,22 +186,22 @@ func ResourceConfigForPod(pod *v1.Pod, enforceCPULimits bool, cpuPeriod uint64, // build the result result := &ResourceConfig{} if qosClass == v1.PodQOSGuaranteed { - result.CpuShares = &cpuShares - result.CpuQuota = &cpuQuota - result.CpuPeriod = &cpuPeriod + result.CPUShares = &cpuShares + result.CPUQuota = &cpuQuota + result.CPUPeriod = &cpuPeriod result.Memory = &memoryLimits } else if qosClass == v1.PodQOSBurstable { - result.CpuShares = &cpuShares + result.CPUShares = &cpuShares if cpuLimitsDeclared { - result.CpuQuota = &cpuQuota - result.CpuPeriod = &cpuPeriod + result.CPUQuota = &cpuQuota + result.CPUPeriod = &cpuPeriod } if memoryLimitsDeclared { result.Memory = &memoryLimits } } else { shares := uint64(MinShares) - result.CpuShares = &shares + result.CPUShares = &shares } result.HugePageLimit = hugePageLimits diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state/state_checkpoint.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state/state_checkpoint.go index 286b9fda8193..05e32b751a67 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state/state_checkpoint.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state/state_checkpoint.go @@ -18,7 +18,7 @@ package state import ( "fmt" - "path" + "path/filepath" "sync" "k8s.io/klog/v2" @@ -52,7 +52,7 @@ func NewCheckpointState(stateDir, checkpointName, policyName string) (State, err if err := stateCheckpoint.restoreState(); err != nil { //nolint:staticcheck // ST1005 user-facing error message return nil, fmt.Errorf("could not restore state from checkpoint: %v, please drain this node and delete the memory manager checkpoint file %q before restarting Kubelet", - err, path.Join(stateDir, checkpointName)) + err, filepath.Join(stateDir, checkpointName)) } return stateCheckpoint, nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/node_container_manager_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/node_container_manager_linux.go index b6c0457bac5c..74221c67047b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/node_container_manager_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/node_container_manager_linux.go @@ -155,7 +155,7 @@ func enforceExistingCgroup(cgroupManager CgroupManager, cName CgroupName, rl v1. Name: cName, ResourceParameters: rp, } - klog.V(4).InfoS("Enforcing limits on cgroup", "cgroupName", cName, "cpuShares", cgroupConfig.ResourceParameters.CpuShares, "memory", cgroupConfig.ResourceParameters.Memory, "pidsLimit", cgroupConfig.ResourceParameters.PidsLimit) + klog.V(4).InfoS("Enforcing limits on cgroup", "cgroupName", cName, "cpuShares", cgroupConfig.ResourceParameters.CPUShares, "memory", cgroupConfig.ResourceParameters.Memory, "pidsLimit", cgroupConfig.ResourceParameters.PidsLimit) if err := cgroupManager.Validate(cgroupConfig.Name); err != nil { return err } @@ -180,7 +180,7 @@ func getCgroupConfig(rl v1.ResourceList) *ResourceConfig { if q, exists := rl[v1.ResourceCPU]; exists { // CPU is defined in milli-cores. val := MilliCPUToShares(q.MilliValue()) - rc.CpuShares = &val + rc.CPUShares = &val } if q, exists := rl[pidlimit.PIDs]; exists { val := q.Value() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/qos_container_manager_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/qos_container_manager_linux.go index 0ddd44ac2343..b2105fc8a914 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/qos_container_manager_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/qos_container_manager_linux.go @@ -98,7 +98,7 @@ func (m *qosContainerManagerImpl) Start(getNodeAllocatable func() v1.ResourceLis // the BestEffort QoS class has a statically configured minShares value if qosClass == v1.PodQOSBestEffort { minShares := uint64(MinShares) - resourceParameters.CpuShares = &minShares + resourceParameters.CPUShares = &minShares } // containerConfig object stores the cgroup specifications @@ -184,11 +184,11 @@ func (m *qosContainerManagerImpl) setCPUCgroupConfig(configs map[v1.PodQOSClass] // make sure best effort is always 2 shares bestEffortCPUShares := uint64(MinShares) - configs[v1.PodQOSBestEffort].ResourceParameters.CpuShares = &bestEffortCPUShares + configs[v1.PodQOSBestEffort].ResourceParameters.CPUShares = &bestEffortCPUShares // set burstable shares based on current observe state burstableCPUShares := MilliCPUToShares(burstablePodCPURequest) - configs[v1.PodQOSBurstable].ResourceParameters.CpuShares = &burstableCPUShares + configs[v1.PodQOSBurstable].ResourceParameters.CPUShares = &burstableCPUShares return nil } @@ -290,7 +290,7 @@ func (m *qosContainerManagerImpl) setMemoryQoS(configs map[v1.PodQOSClass]*Cgrou configs[v1.PodQOSBurstable].ResourceParameters.Unified = make(map[string]string) } configs[v1.PodQOSBurstable].ResourceParameters.Unified[MemoryMin] = strconv.FormatInt(burstableMin, 10) - klog.V(4).InfoS("MemoryQoS config for qos", "qos", v1.PodQOSBurstable, "memory.min", burstableMin) + klog.V(4).InfoS("MemoryQoS config for qos", "qos", v1.PodQOSBurstable, "memoryMin", burstableMin) } if guaranteedMin > 0 { @@ -298,7 +298,7 @@ func (m *qosContainerManagerImpl) setMemoryQoS(configs map[v1.PodQOSClass]*Cgrou configs[v1.PodQOSGuaranteed].ResourceParameters.Unified = make(map[string]string) } configs[v1.PodQOSGuaranteed].ResourceParameters.Unified[MemoryMin] = strconv.FormatInt(guaranteedMin, 10) - klog.V(4).InfoS("MemoryQoS config for qos", "qos", v1.PodQOSGuaranteed, "memory.min", guaranteedMin) + klog.V(4).InfoS("MemoryQoS config for qos", "qos", v1.PodQOSGuaranteed, "memoryMin", guaranteedMin) } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/types.go index edf97334781d..f9ddc55ae2cb 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/types.go @@ -26,11 +26,11 @@ type ResourceConfig struct { // Memory limit (in bytes). Memory *int64 // CPU shares (relative weight vs. other containers). - CpuShares *uint64 + CPUShares *uint64 // CPU hardcap limit (in usecs). Allowed cpu time in a given period. - CpuQuota *int64 + CPUQuota *int64 // CPU quota period. - CpuPeriod *uint64 + CPUPeriod *uint64 // HugePageLimit map from page size (in bytes) to limit (in bytes) HugePageLimit map[int64]int64 // Maximum number of pids diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/defaults.go index b438e0d25d54..effee19e6e90 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/defaults.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/defaults.go @@ -28,5 +28,6 @@ const ( DefaultKubeletPluginContainersDirName = "plugin-containers" DefaultKubeletPodResourcesDirName = "pod-resources" KubeletPluginsDirSELinuxLabel = "system_u:object_r:container_file_t:s0" + KubeletContainersSharedSELinuxLabel = "system_u:object_r:container_file_t:s0" DefaultKubeletCheckpointsDirName = "checkpoints" ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/remote/remote_image.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/remote/remote_image.go index ee005d990383..1deff550fd8c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/remote/remote_image.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/remote/remote_image.go @@ -25,9 +25,7 @@ import ( "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "go.opentelemetry.io/otel/trace" "google.golang.org/grpc" - "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/status" utilfeature "k8s.io/apiserver/pkg/util/feature" tracing "k8s.io/component-base/tracing" "k8s.io/klog/v2" @@ -93,13 +91,11 @@ func (r *remoteImageService) validateServiceConnection(ctx context.Context, conn klog.V(4).InfoS("Validating the CRI v1 API image version") r.imageClient = runtimeapi.NewImageServiceClient(conn) - if _, err := r.imageClient.ImageFsInfo(ctx, &runtimeapi.ImageFsInfoRequest{}); err == nil { - klog.V(2).InfoS("Validated CRI v1 image API") - - } else if status.Code(err) == codes.Unimplemented { - return fmt.Errorf("CRI v1 image API is not implemented for endpoint %q: %w", endpoint, err) + if _, err := r.imageClient.ImageFsInfo(ctx, &runtimeapi.ImageFsInfoRequest{}); err != nil { + return fmt.Errorf("validate CRI v1 image API for endpoint %q: %w", endpoint, err) } + klog.V(2).InfoS("Validated CRI v1 image API") return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/remote/remote_runtime.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/remote/remote_runtime.go index cd7b02efb741..7a886bda3954 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/remote/remote_runtime.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/remote/remote_runtime.go @@ -117,13 +117,11 @@ func (r *remoteRuntimeService) validateServiceConnection(ctx context.Context, co klog.V(4).InfoS("Validating the CRI v1 API runtime version") r.runtimeClient = runtimeapi.NewRuntimeServiceClient(conn) - if _, err := r.runtimeClient.Version(ctx, &runtimeapi.VersionRequest{}); err == nil { - klog.V(2).InfoS("Validated CRI v1 runtime API") - - } else if status.Code(err) == codes.Unimplemented { - return fmt.Errorf("CRI v1 runtime API is not implemented for endpoint %q: %w", endpoint, err) + if _, err := r.runtimeClient.Version(ctx, &runtimeapi.VersionRequest{}); err != nil { + return fmt.Errorf("validate CRI v1 runtime API for endpoint %q: %w", endpoint, err) } + klog.V(2).InfoS("Validated CRI v1 runtime API") return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go index 1a03b11ed463..d8eda75da1d7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go @@ -24,7 +24,6 @@ import ( "net" "net/http" "os" - "path" "path/filepath" sysruntime "runtime" "sort" @@ -33,11 +32,13 @@ import ( "time" "github.com/opencontainers/selinux/go-selinux" + "k8s.io/client-go/informers" cadvisorapi "github.com/google/cadvisor/info/v1" libcontaineruserns "github.com/opencontainers/runc/libcontainer/userns" "go.opentelemetry.io/otel/trace" + "k8s.io/mount-utils" "k8s.io/utils/integer" netutils "k8s.io/utils/net" @@ -301,24 +302,20 @@ func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, ku } // PreInitRuntimeService will init runtime service before RunKubelet. -func PreInitRuntimeService(kubeCfg *kubeletconfiginternal.KubeletConfiguration, - kubeDeps *Dependencies, - remoteRuntimeEndpoint string, - remoteImageEndpoint string) error { - // remoteImageEndpoint is same as remoteRuntimeEndpoint if not explicitly specified - if remoteRuntimeEndpoint != "" && remoteImageEndpoint == "" { - remoteImageEndpoint = remoteRuntimeEndpoint +func PreInitRuntimeService(kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *Dependencies) error { + remoteImageEndpoint := kubeCfg.ImageServiceEndpoint + if remoteImageEndpoint == "" && kubeCfg.ContainerRuntimeEndpoint != "" { + remoteImageEndpoint = kubeCfg.ContainerRuntimeEndpoint } - var err error - if kubeDeps.RemoteRuntimeService, err = remote.NewRemoteRuntimeService(remoteRuntimeEndpoint, kubeCfg.RuntimeRequestTimeout.Duration, kubeDeps.TracerProvider); err != nil { + if kubeDeps.RemoteRuntimeService, err = remote.NewRemoteRuntimeService(kubeCfg.ContainerRuntimeEndpoint, kubeCfg.RuntimeRequestTimeout.Duration, kubeDeps.TracerProvider); err != nil { return err } if kubeDeps.RemoteImageService, err = remote.NewRemoteImageService(remoteImageEndpoint, kubeCfg.RuntimeRequestTimeout.Duration, kubeDeps.TracerProvider); err != nil { return err } - kubeDeps.useLegacyCadvisorStats = cadvisor.UsingLegacyCadvisorStats(remoteRuntimeEndpoint) + kubeDeps.useLegacyCadvisorStats = cadvisor.UsingLegacyCadvisorStats(kubeCfg.ContainerRuntimeEndpoint) return nil } @@ -609,7 +606,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, // podManager is also responsible for keeping secretManager and configMapManager contents up-to-date. mirrorPodClient := kubepod.NewBasicMirrorClient(klet.kubeClient, string(nodeName), nodeLister) - klet.podManager = kubepod.NewBasicPodManager(mirrorPodClient, secretManager, configMapManager) + klet.podManager = kubepod.NewBasicPodManager(mirrorPodClient) klet.statusManager = status.NewManager(klet.kubeClient, klet.podManager, klet, kubeDeps.PodStartupLatencyTracker) @@ -1309,7 +1306,7 @@ func (kl *Kubelet) RlimitStats() (*statsapi.RlimitStats, error) { // 4. the pod-resources directory // 5. the checkpoint directory func (kl *Kubelet) setupDataDirs() error { - kl.rootDirectory = path.Clean(kl.rootDirectory) + kl.rootDirectory = filepath.Clean(kl.rootDirectory) pluginRegistrationDir := kl.getPluginsRegistrationDir() pluginsDir := kl.getPluginsDir() if err := os.MkdirAll(kl.getRootDir(), 0750); err != nil { @@ -1573,15 +1570,15 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) { // Arguments: // // updateType - whether this is a create (first time) or an update, should -// -// only be used for metrics since this method must be reentrant +// only be used for metrics since this method must be reentrant // // pod - the pod that is being set up +// // mirrorPod - the mirror pod known to the kubelet for this pod, if any -// podStatus - the most recent pod status observed for this pod which can // -// be used to determine the set of actions that should be taken during -// this loop of syncPod +// podStatus - the most recent pod status observed for this pod which can +// be used to determine the set of actions that should be taken during +// this loop of syncPod // // The workflow is: // - If the pod is being created, record pod worker start latency diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go index 2e309de82c8d..2a4272d26925 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go @@ -24,7 +24,6 @@ import ( "net/http" "net/url" "os" - "path" "path/filepath" "runtime" "sort" @@ -123,7 +122,7 @@ func (kl *Kubelet) makeBlockVolumes(pod *v1.Pod, container *v1.Container, podVol } // Get a symbolic link associated to a block device under pod device path dirPath, volName := vol.BlockVolumeMapper.GetPodDeviceMapPath() - symlinkPath := path.Join(dirPath, volName) + symlinkPath := filepath.Join(dirPath, volName) if islinkExist, checkErr := blkutil.IsSymlinkExist(symlinkPath); checkErr != nil { return nil, checkErr } else if islinkExist { @@ -303,7 +302,7 @@ func translateMountPropagation(mountMode *v1.MountPropagationMode) (runtimeapi.M // getEtcHostsPath returns the full host-side path to a pod's generated /etc/hosts file func getEtcHostsPath(podDir string) string { - hostsFilePath := path.Join(podDir, "etc-hosts") + hostsFilePath := filepath.Join(podDir, "etc-hosts") // Volume Mounts fail on Windows if it is not of the form C:/ return volumeutil.MakeAbsolutePath(runtime.GOOS, hostsFilePath) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go index 8ab8b557eb1f..2ebdb5d18016 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go @@ -116,7 +116,6 @@ func ephemeralContainerStartSpec(ec *v1.EphemeralContainer) *startSpec { // targeting. The target is stored as EphemeralContainer.TargetContainerName, which must be // resolved to a ContainerID using podStatus. The target container must already exist, which // usually isn't a problem since ephemeral containers aren't allowed at pod creation time. -// This always returns nil when the EphemeralContainers feature is disabled. func (s *startSpec) getTargetID(podStatus *kubecontainer.PodStatus) (*kubecontainer.ContainerID, error) { if s.ephemeralContainer == nil || s.ephemeralContainer.TargetContainerName == "" { return nil, nil @@ -1007,7 +1006,7 @@ func setTerminationGracePeriod(pod *v1.Pod, containerSpec *v1.Container, contain func isProbeTerminationGracePeriodSecondsSet(pod *v1.Pod, containerSpec *v1.Container, probe *v1.Probe, containerName string, containerID kubecontainer.ContainerID, probeType string) bool { if probe != nil && probe.TerminationGracePeriodSeconds != nil { if *probe.TerminationGracePeriodSeconds > *pod.Spec.TerminationGracePeriodSeconds { - klog.V(4).InfoS("Using probe-level grace period that is greater than the pod-level grace period", "pod", klog.KObj(pod), "pod-uid", pod.UID, "containerName", containerName, "containerID", containerID.String(), "probe-type", probeType, "probe-grace-period", *probe.TerminationGracePeriodSeconds, "pod-grace-period", *pod.Spec.TerminationGracePeriodSeconds) + klog.V(4).InfoS("Using probe-level grace period that is greater than the pod-level grace period", "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", containerName, "containerID", containerID.String(), "probeType", probeType, "probeGracePeriod", *probe.TerminationGracePeriodSeconds, "podGracePeriod", *pod.Spec.TerminationGracePeriodSeconds) } return true } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container_windows.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container_windows.go index 66430cf163e9..4ae6ffa09d72 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container_windows.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container_windows.go @@ -20,12 +20,12 @@ limitations under the License. package kuberuntime import ( - "runtime" - v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" "k8s.io/klog/v2" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/winstats" "k8s.io/kubernetes/pkg/securitycontext" ) @@ -50,11 +50,6 @@ func (m *kubeGenericRuntimeManager) generateWindowsContainerConfig(container *v1 cpuLimit := container.Resources.Limits.Cpu() if !cpuLimit.IsZero() { - // Note that sysinfo.NumCPU() is limited to 64 CPUs on Windows due to Processor Groups, - // as only 64 processors are available for execution by a given process. This causes - // some oddities on systems with more than 64 processors. - // Refer https://msdn.microsoft.com/en-us/library/windows/desktop/dd405503(v=vs.85).aspx. - // Since Kubernetes doesn't have any notion of weight in the Pod/Container API, only limits/reserves, then applying CpuMaximum only // will better follow the intent of the user. At one point CpuWeights were set, but this prevented limits from having any effect. @@ -78,17 +73,7 @@ func (m *kubeGenericRuntimeManager) generateWindowsContainerConfig(container *v1 // https://github.com/kubernetes/kubernetes/blob/56d1c3b96d0a544130a82caad33dd57629b8a7f8/staging/src/k8s.io/cri-api/pkg/apis/runtime/v1/api.proto#L681-L682 // https://github.com/opencontainers/runtime-spec/blob/ad53dcdc39f1f7f7472b10aa0a45648fe4865496/config-windows.md#cpu // If both CpuWeight and CpuMaximum are set - ContainerD catches this invalid case and returns an error instead. - - cpuMaximum := 10000 * cpuLimit.MilliValue() / int64(runtime.NumCPU()) / 1000 - - // ensure cpuMaximum is in range [1, 10000]. - if cpuMaximum < 1 { - cpuMaximum = 1 - } else if cpuMaximum > 10000 { - cpuMaximum = 10000 - } - - wc.Resources.CpuMaximum = cpuMaximum + wc.Resources.CpuMaximum = calculateCPUMaximum(cpuLimit, int64(winstats.ProcessorCount())) } // The processor resource controls are mutually exclusive on @@ -128,3 +113,16 @@ func (m *kubeGenericRuntimeManager) generateWindowsContainerConfig(container *v1 return wc, nil } + +// calculateCPUMaximum calculates the maximum CPU given a limit and a number of cpus while ensuring it's in range [1,10000]. +func calculateCPUMaximum(cpuLimit *resource.Quantity, cpuCount int64) int64 { + cpuMaximum := 10 * cpuLimit.MilliValue() / cpuCount + + // ensure cpuMaximum is in range [1, 10000]. + if cpuMaximum < 1 { + cpuMaximum = 1 + } else if cpuMaximum > 10000 { + cpuMaximum = 10000 + } + return cpuMaximum +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/legacy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/legacy.go index 1187123ad7fa..296a5f5588ad 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/legacy.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/legacy.go @@ -18,7 +18,7 @@ package kuberuntime import ( "fmt" - "path" + "path/filepath" "strings" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -71,5 +71,5 @@ func logSymlink(containerLogsDir, podFullName, containerName, containerID string if len(logPath) > ext4MaxFileNameLen-len(suffix) { logPath = logPath[:ext4MaxFileNameLen-len(suffix)] } - return path.Join(containerLogsDir, logPath+suffix) + return filepath.Join(containerLogsDir, logPath+suffix) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/predicate.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/predicate.go index 2e06c266f737..96808cf71007 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/predicate.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/predicate.go @@ -185,8 +185,7 @@ func rejectPodAdmissionBasedOnOSSelector(pod *v1.Pod, node *v1.Node) bool { // rejectPodAdmissionBasedOnOSField rejects pods if their OS field doesn't match runtime.GOOS. // TODO: Relax this restriction when we start supporting LCOW in kubernetes where podOS may not match -// -// node's OS. +// node's OS. func rejectPodAdmissionBasedOnOSField(pod *v1.Pod) bool { if pod.Spec.OS == nil { return false diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/collectors/cri_metrics.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/collectors/cri_metrics.go index bd8dccc88939..8736eb9e3197 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/collectors/cri_metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/collectors/cri_metrics.go @@ -102,7 +102,7 @@ func (c *criMetricsCollector) criMetricToProm(m *runtimeapi.Metric) (metrics.Met desc, ok := c.descriptors[m.Name] if !ok { err := fmt.Errorf("error converting CRI Metric to prometheus format") - klog.V(5).ErrorS(err, "Descriptor not present in pre-populated list of descriptors", "descriptor name", m.Name) + klog.V(5).ErrorS(err, "Descriptor not present in pre-populated list of descriptors", "name", m.Name) return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go index fe8d3c00c0eb..e461f1e7705e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go @@ -496,7 +496,7 @@ var ( &metrics.GaugeOpts{ Subsystem: KubeletSubsystem, Name: ManagedEphemeralContainersKey, - Help: "Current number of ephemeral containers in pods managed by this kubelet. Ephemeral containers will be ignored if disabled by the EphemeralContainers feature gate, and this number will be 0.", + Help: "Current number of ephemeral containers in pods managed by this kubelet.", StabilityLevel: metrics.ALPHA, }, ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/dns.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/dns.go index 4b491c567e28..81f1927cc864 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/dns.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/dns.go @@ -319,7 +319,7 @@ func getPodDNSType(pod *v1.Pod) (podDNSType, error) { } // This should not happen as kube-apiserver should have rejected // invalid dnsPolicy. - return podDNSCluster, fmt.Errorf(fmt.Sprintf("invalid DNSPolicy=%v", dnsPolicy)) + return podDNSCluster, fmt.Errorf("invalid DNSPolicy=%v", dnsPolicy) } // mergeDNSOptions merges DNS options. If duplicated, entries given by PodDNSConfigOption will diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/types.go index 6b0a9a430f00..aef225d31cb3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/types.go @@ -36,14 +36,12 @@ package cache // // The pluginwatcher module follows strictly and sequentially this state machine for each *plugin name*. // e.g: If you are Registering a plugin foo, you cannot get a DeRegister call for plugin foo -// -// until the Register("foo") call returns. Nor will you get a Validate("foo", "Different endpoint", ...) -// call until the Register("foo") call returns. +// until the Register("foo") call returns. Nor will you get a Validate("foo", "Different endpoint", ...) +// call until the Register("foo") call returns. // // ReRegistration: Socket created with same plugin name, usually for a plugin update // e.g: plugin with name foo registers at foo.com/foo-1.9.7 later a plugin with name foo -// -// registers at foo.com/foo-1.9.9 +// registers at foo.com/foo-1.9.9 // // DeRegistration: When ReRegistration happens only the deletion of the new socket will trigger a DeRegister call type PluginHandler interface { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta1/api.pb.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta1/api.pb.go index bbe39bddeaa5..867e4a845949 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta1/api.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta1/api.pb.go @@ -151,22 +151,25 @@ func init() { func init() { proto.RegisterFile("api.proto", fileDescriptor_00212fb1f9d3bf1c) } var fileDescriptor_00212fb1f9d3bf1c = []byte{ - // 227 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4c, 0x2c, 0xc8, 0xd4, - 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0x94, 0xd2, - 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0x4f, 0xcf, 0xd7, - 0x07, 0xcb, 0x27, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0xd1, 0xa7, 0xe4, 0xcf, 0xc5, - 0xe7, 0x5a, 0x91, 0x98, 0x5b, 0x90, 0x93, 0x1a, 0x94, 0x5a, 0x58, 0x9a, 0x5a, 0x5c, 0x22, 0x24, - 0xc1, 0xc5, 0x5e, 0x04, 0x61, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x06, 0xc1, 0xb8, 0x42, 0xca, - 0x5c, 0xbc, 0x50, 0x5b, 0xe2, 0xd3, 0x32, 0x53, 0x73, 0x52, 0x24, 0x98, 0xc0, 0xf2, 0x3c, 0x50, - 0x41, 0x37, 0x90, 0x98, 0x92, 0x3a, 0x17, 0x3f, 0xdc, 0xc0, 0xe2, 0x82, 0xfc, 0xbc, 0xe2, 0x54, - 0x21, 0x11, 0x2e, 0xd6, 0xd4, 0xa2, 0xa2, 0xfc, 0x22, 0xa8, 0x79, 0x10, 0x8e, 0x51, 0x00, 0x17, - 0x3b, 0x54, 0xa1, 0x90, 0x2b, 0x17, 0x9f, 0x7b, 0x6a, 0x09, 0x94, 0xe7, 0x99, 0x97, 0x96, 0x2f, - 0x24, 0xae, 0x07, 0x35, 0x54, 0x0f, 0xd5, 0x75, 0x52, 0x12, 0x98, 0x12, 0x10, 0x5b, 0x94, 0x18, - 0x9c, 0x64, 0x4e, 0x3c, 0x94, 0x63, 0xbc, 0xf1, 0x50, 0x8e, 0xa1, 0xe1, 0x91, 0x1c, 0xe3, 0x89, - 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0x43, - 0x12, 0x1b, 0xd8, 0xc3, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x99, 0x62, 0xd1, 0x9c, 0x35, - 0x01, 0x00, 0x00, + // 287 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x90, 0xbf, 0x4a, 0x34, 0x31, + 0x14, 0xc5, 0x37, 0x1f, 0x7c, 0x2e, 0x1b, 0x74, 0x85, 0x20, 0x38, 0x6c, 0x11, 0x64, 0x2c, 0xb4, + 0x71, 0xc2, 0x6a, 0x63, 0x2d, 0xac, 0x62, 0xa5, 0x2c, 0xd8, 0xd8, 0x0c, 0x99, 0xf5, 0x4e, 0x36, + 0xcc, 0x9f, 0xc4, 0x24, 0xa3, 0x96, 0x3e, 0x82, 0x8f, 0xb5, 0xa5, 0xa5, 0xa5, 0x3b, 0xbe, 0x88, + 0x98, 0x44, 0x41, 0xec, 0xee, 0xef, 0x9c, 0xe4, 0x9c, 0xcb, 0xc5, 0x23, 0xae, 0x65, 0xa6, 0x8d, + 0x72, 0x8a, 0x0c, 0x1f, 0xa6, 0x05, 0x38, 0x3e, 0x9d, 0x1c, 0x09, 0xe9, 0x96, 0x5d, 0x91, 0x2d, + 0x54, 0xc3, 0x84, 0x12, 0x8a, 0x79, 0xbf, 0xe8, 0x4a, 0x4f, 0x1e, 0xfc, 0x14, 0xfe, 0xa5, 0x57, + 0x78, 0x3c, 0x7b, 0xe2, 0x8d, 0xae, 0x61, 0x0e, 0xf7, 0x1d, 0x58, 0x47, 0x12, 0x3c, 0x34, 0x61, + 0x4c, 0xd0, 0x1e, 0x3a, 0x1c, 0xcd, 0xbf, 0x91, 0xec, 0xe3, 0xad, 0xd8, 0x92, 0x97, 0x12, 0xea, + 0xbb, 0xe4, 0x9f, 0xf7, 0x37, 0xa3, 0x78, 0xfe, 0xa5, 0xa5, 0x07, 0x78, 0xfb, 0x27, 0xd0, 0x6a, + 0xd5, 0x5a, 0x20, 0x3b, 0xf8, 0x3f, 0x18, 0xa3, 0x4c, 0xcc, 0x0b, 0x70, 0x7c, 0x8d, 0x87, 0xf1, + 0x21, 0x99, 0xe1, 0xf1, 0x05, 0xb8, 0x48, 0x97, 0x6d, 0xa9, 0xc8, 0x6e, 0x16, 0x43, 0xb3, 0xdf, + 0xdb, 0x4d, 0x92, 0xbf, 0x46, 0x68, 0x49, 0x07, 0x67, 0x76, 0xb5, 0xa6, 0xe8, 0x6d, 0x4d, 0x07, + 0xcf, 0x3d, 0x45, 0xab, 0x9e, 0xa2, 0xd7, 0x9e, 0xa2, 0xf7, 0x9e, 0xa2, 0x97, 0x0f, 0x3a, 0xb8, + 0xbd, 0xa9, 0x4e, 0x6d, 0x26, 0x15, 0xab, 0xba, 0x02, 0x4c, 0x0b, 0x0e, 0x2c, 0xd3, 0x95, 0xf0, + 0x58, 0x83, 0x63, 0xba, 0xee, 0x84, 0x6c, 0x1b, 0xde, 0x72, 0x01, 0x26, 0xd2, 0x23, 0x77, 0x8b, + 0x25, 0x18, 0x06, 0xa1, 0x2a, 0x0f, 0x6a, 0xce, 0xb5, 0xb4, 0x2c, 0xae, 0x51, 0x6c, 0xf8, 0x3b, + 0x9e, 0x7c, 0x06, 0x00, 0x00, 0xff, 0xff, 0x25, 0xab, 0xfe, 0x7d, 0x8c, 0x01, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta1/api.proto b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta1/api.proto index f0e09ae3dfe6..845950e48e52 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta1/api.proto +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta1/api.proto @@ -17,6 +17,7 @@ limitations under the License. syntax = "proto3"; package v1beta1; +option go_package = "k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta1"; import "github.com/gogo/protobuf/gogoproto/gogo.proto"; diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta2/api.pb.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta2/api.pb.go index 85bfc66c3bc8..92fdf25080e3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta2/api.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta2/api.pb.go @@ -152,22 +152,25 @@ func init() { func init() { proto.RegisterFile("api.proto", fileDescriptor_00212fb1f9d3bf1c) } var fileDescriptor_00212fb1f9d3bf1c = []byte{ - // 227 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4c, 0x2c, 0xc8, 0xd4, - 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0x92, 0xd2, - 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0x4f, 0xcf, 0xd7, - 0x07, 0xcb, 0x27, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0xd1, 0xa7, 0xe4, 0xcf, 0xc5, - 0xe7, 0x5a, 0x91, 0x98, 0x5b, 0x90, 0x93, 0x1a, 0x94, 0x5a, 0x58, 0x9a, 0x5a, 0x5c, 0x22, 0x24, - 0xc1, 0xc5, 0x5e, 0x04, 0x61, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x06, 0xc1, 0xb8, 0x42, 0xca, - 0x5c, 0xbc, 0x50, 0x5b, 0xe2, 0xd3, 0x32, 0x53, 0x73, 0x52, 0x24, 0x98, 0xc0, 0xf2, 0x3c, 0x50, - 0x41, 0x37, 0x90, 0x98, 0x92, 0x3a, 0x17, 0x3f, 0xdc, 0xc0, 0xe2, 0x82, 0xfc, 0xbc, 0xe2, 0x54, - 0x21, 0x11, 0x2e, 0xd6, 0xd4, 0xa2, 0xa2, 0xfc, 0x22, 0xa8, 0x79, 0x10, 0x8e, 0x51, 0x00, 0x17, - 0x3b, 0x54, 0xa1, 0x90, 0x2b, 0x17, 0x9f, 0x7b, 0x6a, 0x09, 0x94, 0xe7, 0x99, 0x97, 0x96, 0x2f, - 0x24, 0xae, 0x07, 0x35, 0x54, 0x0f, 0xd5, 0x75, 0x52, 0x12, 0x98, 0x12, 0x10, 0x5b, 0x94, 0x18, - 0x9c, 0x64, 0x4e, 0x3c, 0x94, 0x63, 0xbc, 0xf1, 0x50, 0x8e, 0xa1, 0xe1, 0x91, 0x1c, 0xe3, 0x89, - 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0x43, - 0x12, 0x1b, 0xd8, 0xc3, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa8, 0x79, 0x17, 0x13, 0x35, - 0x01, 0x00, 0x00, + // 287 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x90, 0xbf, 0x4a, 0x34, 0x31, + 0x14, 0xc5, 0x37, 0x1f, 0x7c, 0x2e, 0x1b, 0x74, 0x85, 0x20, 0x38, 0x6c, 0x11, 0x64, 0x2c, 0xb4, + 0x71, 0x82, 0x6b, 0x63, 0x2d, 0xac, 0x62, 0xa5, 0x2c, 0xd8, 0xd8, 0x0c, 0x99, 0xf5, 0x4e, 0x36, + 0xcc, 0x9f, 0xc4, 0x24, 0xa3, 0x96, 0x3e, 0x82, 0x8f, 0xb5, 0xa5, 0xa5, 0xa5, 0x3b, 0xbe, 0x88, + 0x98, 0x44, 0x41, 0xec, 0xee, 0xef, 0x9c, 0xe4, 0x9c, 0xcb, 0xc5, 0x23, 0xae, 0x65, 0xa6, 0x8d, + 0x72, 0x8a, 0x0c, 0x1f, 0x8e, 0x0b, 0x70, 0x7c, 0x3a, 0x39, 0x12, 0xd2, 0x2d, 0xbb, 0x22, 0x5b, + 0xa8, 0x86, 0x09, 0x25, 0x14, 0xf3, 0x7e, 0xd1, 0x95, 0x9e, 0x3c, 0xf8, 0x29, 0xfc, 0x4b, 0xaf, + 0xf0, 0x78, 0xf6, 0xc4, 0x1b, 0x5d, 0xc3, 0x1c, 0xee, 0x3b, 0xb0, 0x8e, 0x24, 0x78, 0x68, 0xc2, + 0x98, 0xa0, 0x3d, 0x74, 0x38, 0x9a, 0x7f, 0x23, 0xd9, 0xc7, 0x5b, 0xb1, 0x25, 0x2f, 0x25, 0xd4, + 0x77, 0xc9, 0x3f, 0xef, 0x6f, 0x46, 0xf1, 0xfc, 0x4b, 0x4b, 0x0f, 0xf0, 0xf6, 0x4f, 0xa0, 0xd5, + 0xaa, 0xb5, 0x40, 0x76, 0xf0, 0x7f, 0x30, 0x46, 0x99, 0x98, 0x17, 0x60, 0x7a, 0x8d, 0x87, 0xf1, + 0x21, 0x99, 0xe1, 0xf1, 0x05, 0xb8, 0x48, 0x97, 0x6d, 0xa9, 0xc8, 0x6e, 0x16, 0x43, 0xb3, 0xdf, + 0xdb, 0x4d, 0x92, 0xbf, 0x46, 0x68, 0x49, 0x07, 0x67, 0x76, 0xb5, 0xa6, 0xe8, 0x6d, 0x4d, 0x07, + 0xcf, 0x3d, 0x45, 0xab, 0x9e, 0xa2, 0xd7, 0x9e, 0xa2, 0xf7, 0x9e, 0xa2, 0x97, 0x0f, 0x3a, 0xb8, + 0xbd, 0xa9, 0x4e, 0x6d, 0x26, 0x15, 0xab, 0xba, 0x02, 0x4c, 0x0b, 0x0e, 0x2c, 0xd3, 0x95, 0xf0, + 0x58, 0x83, 0x63, 0xba, 0xee, 0x84, 0x6c, 0x1b, 0xde, 0x72, 0x01, 0x26, 0xd2, 0x23, 0x77, 0x8b, + 0x25, 0x18, 0x06, 0xa1, 0x2a, 0x0f, 0x6a, 0xce, 0xb5, 0xb4, 0x2c, 0xae, 0x51, 0x6c, 0xf8, 0x3b, + 0x9e, 0x7c, 0x06, 0x00, 0x00, 0xff, 0xff, 0x28, 0x67, 0xcf, 0xa3, 0x8c, 0x01, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta2/api.proto b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta2/api.proto index 10ed44f17454..3f8cfeaffe87 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta2/api.proto +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta2/api.proto @@ -17,6 +17,7 @@ limitations under the License. syntax = "proto3"; package v1beta2; +option go_package = "k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta2"; import "github.com/gogo/protobuf/gogoproto/gogo.proto"; diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/plugin_watcher.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/plugin_watcher.go index 35c51683472d..44a08012c0dc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/plugin_watcher.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/plugin_watcher.go @@ -19,7 +19,6 @@ package pluginwatcher import ( "fmt" "os" - "runtime" "strings" "github.com/fsnotify/fsnotify" @@ -159,13 +158,7 @@ func (w *Watcher) traversePluginDir(dir string) error { func (w *Watcher) handleCreateEvent(event fsnotify.Event) error { klog.V(6).InfoS("Handling create event", "event", event) - fi, err := os.Stat(event.Name) - // TODO: This is a workaround for Windows 20H2 issue for os.Stat(). Please see - // microsoft/Windows-Containers#97 for details. - // Once the issue is resvolved, the following os.Lstat() is not needed. - if err != nil && runtime.GOOS == "windows" { - fi, err = os.Lstat(event.Name) - } + fi, err := getStat(event) if err != nil { return fmt.Errorf("stat file %s failed: %v", event.Name, err) } @@ -192,9 +185,7 @@ func (w *Watcher) handleCreateEvent(event fsnotify.Event) error { } func (w *Watcher) handlePluginRegistration(socketPath string) error { - if runtime.GOOS == "windows" { - socketPath = util.NormalizePath(socketPath) - } + socketPath = getSocketPath(socketPath) // Update desired state of world list of plugins // If the socket path does exist in the desired world cache, there's still // a possibility that it has been deleted and recreated again before it is diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/plugin_watcher_others.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/plugin_watcher_others.go new file mode 100644 index 000000000000..2ecb570238ed --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/plugin_watcher_others.go @@ -0,0 +1,34 @@ +//go:build !windows +// +build !windows + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pluginwatcher + +import ( + "os" + + "github.com/fsnotify/fsnotify" +) + +func getStat(event fsnotify.Event) (os.FileInfo, error) { + return os.Stat(event.Name) +} + +func getSocketPath(socketPath string) string { + return socketPath +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/plugin_watcher_windows.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/plugin_watcher_windows.go new file mode 100644 index 000000000000..476ae4fc0c71 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/plugin_watcher_windows.go @@ -0,0 +1,40 @@ +//go:build windows +// +build windows + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pluginwatcher + +import ( + "github.com/fsnotify/fsnotify" + "k8s.io/kubernetes/pkg/kubelet/util" + "os" +) + +func getStat(event fsnotify.Event) (os.FileInfo, error) { + fi, err := os.Stat(event.Name) + // TODO: This is a workaround for Windows 20H2 issue for os.Stat(). Please see + // microsoft/Windows-Containers#97 for details. + // Once the issue is resvolved, the following os.Lstat() is not needed. + if err != nil { + fi, err = os.Lstat(event.Name) + } + + return fi, err +} + +var getSocketPath = util.NormalizePath diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/reconciler/reconciler.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/reconciler/reconciler.go index 0c14c1c4cb71..68b5fd764f84 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/reconciler/reconciler.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/reconciler/reconciler.go @@ -47,18 +47,15 @@ type Reconciler interface { // NewReconciler returns a new instance of Reconciler. // -// loopSleepDuration - the amount of time the reconciler loop sleeps between -// -// successive executions -// syncDuration - the amount of time the syncStates sleeps between -// successive executions -// // operationExecutor - used to trigger register/unregister operations safely +// (prevents more than one operation from being triggered on the same +// socket path) // -// (prevents more than one operation from being triggered on the same -// socket path) +// loopSleepDuration - the amount of time the reconciler loop sleeps between +// successive executions // // desiredStateOfWorld - cache containing the desired state of the world +// // actualStateOfWorld - cache containing the actual state of the world func NewReconciler( operationExecutor operationexecutor.OperationExecutor, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/pod_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/pod_manager.go index 2eeb4788af88..69457e6c983b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/pod_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/pod_manager.go @@ -22,10 +22,8 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/kubelet/configmap" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/metrics" - "k8s.io/kubernetes/pkg/kubelet/secret" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" ) @@ -115,19 +113,13 @@ type basicManager struct { // Mirror pod UID to pod UID map. translationByUID map[kubetypes.MirrorPodUID]kubetypes.ResolvedPodUID - // basicManager is keeping secretManager and configMapManager up-to-date. - secretManager secret.Manager - configMapManager configmap.Manager - // A mirror pod client to create/delete mirror pods. MirrorClient } // NewBasicPodManager returns a functional Manager. -func NewBasicPodManager(client MirrorClient, secretManager secret.Manager, configMapManager configmap.Manager) Manager { +func NewBasicPodManager(client MirrorClient) Manager { pm := &basicManager{} - pm.secretManager = secretManager - pm.configMapManager = configMapManager pm.MirrorClient = client pm.SetPods(nil) return pm diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/cadvisor_stats_provider.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/cadvisor_stats_provider.go index a896610c9879..be428561d1e1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/cadvisor_stats_provider.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/cadvisor_stats_provider.go @@ -19,7 +19,7 @@ package stats import ( "context" "fmt" - "path" + "path/filepath" "sort" "strings" @@ -318,7 +318,7 @@ func filterTerminatedContainerInfoAndAssembleByPodCgroupKey(containerInfo map[st podCgroupKey = internalCgroupName[len(internalCgroupName)-1] } else { // Take last component only. - podCgroupKey = path.Base(key) + podCgroupKey = filepath.Base(key) } cinfosByPodCgroupKey[podCgroupKey] = cinfo if !isPodManagedContainer(&cinfo) { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/cri_stats_provider.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/cri_stats_provider.go index 60c4abe53f0e..d1e1d90b275c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/cri_stats_provider.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/cri_stats_provider.go @@ -20,7 +20,7 @@ import ( "context" "errors" "fmt" - "path" + "path/filepath" "sort" "strings" "sync" @@ -899,7 +899,7 @@ func getCRICadvisorStats(infos map[string]cadvisorapiv2.ContainerInfo) (map[stri func extractIDFromCgroupPath(cgroupPath string) string { // case0 == cgroupfs: "/kubepods/burstable/pod2fc932ce-fdcc-454b-97bd-aadfdeb4c340/9be25294016e2dc0340dd605ce1f57b492039b267a6a618a7ad2a7a58a740f32" - id := path.Base(cgroupPath) + id := filepath.Base(cgroupPath) // case1 == systemd: "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2fc932ce_fdcc_454b_97bd_aadfdeb4c340.slice/cri-containerd-aaefb9d8feed2d453b543f6d928cede7a4dbefa6a0ae7c9b990dd234c56e93b9.scope" // trim anything before the final '-' and suffix .scope diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pod_startup_latency_tracker.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pod_startup_latency_tracker.go index 12ee06bd6ec3..88a1b506f714 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pod_startup_latency_tracker.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pod_startup_latency_tracker.go @@ -102,7 +102,7 @@ func (p *basicPodStartupLatencyTracker) ObservedPodOnWatch(pod *v1.Pod, when tim klog.InfoS("Observed pod startup duration", "pod", klog.KObj(pod), "podStartSLOduration", podStartSLOduration, - "pod.CreationTimestamp", pod.CreationTimestamp.Time, + "podCreationTimestamp", pod.CreationTimestamp.Time, "firstStartedPulling", state.firstStartedPulling, "lastFinishedPulling", state.lastFinishedPulling, "observedRunningTime", state.observedRunningTime, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go index a18df61916d9..52dc35dc9c80 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go @@ -81,12 +81,10 @@ type podStateProvider interface { // // kubeClient - used to fetch PV and PVC objects from the API server // loopSleepDuration - the amount of time the populator loop sleeps between -// -// successive executions +// successive executions // // podManager - the kubelet podManager that is the source of truth for the pods -// -// that exist on this host +// that exist on this host // // desiredStateOfWorld - the cache to populate func NewDesiredStateOfWorldPopulator( @@ -205,10 +203,18 @@ func (dswp *desiredStateOfWorldPopulator) findAndAddNewPods() { } for _, pod := range dswp.podManager.GetPods() { - if dswp.podStateProvider.ShouldPodContainersBeTerminating(pod.UID) { + // Keep consistency of adding pod during reconstruction + if dswp.hasAddedPods && dswp.podStateProvider.ShouldPodContainersBeTerminating(pod.UID) { // Do not (re)add volumes for pods that can't also be starting containers continue } + + if !dswp.hasAddedPods && dswp.podStateProvider.ShouldPodRuntimeBeRemoved(pod.UID) { + // When kubelet restarts, we need to add pods to dsw if there is a possibility + // that the container may still be running + continue + } + dswp.processPodVolumes(pod, mountedVolumesForPod) } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler_common.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler_common.go index c88bb156b638..e51ddd85e5f6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler_common.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler_common.go @@ -60,33 +60,33 @@ type Reconciler interface { // NewReconciler returns a new instance of Reconciler. // // controllerAttachDetachEnabled - if true, indicates that the attach/detach -// -// controller is responsible for managing the attach/detach operations for -// this node, and therefore the volume manager should not +// controller is responsible for managing the attach/detach operations for +// this node, and therefore the volume manager should not // // loopSleepDuration - the amount of time the reconciler loop sleeps between -// -// successive executions +// successive executions // // waitForAttachTimeout - the amount of time the Mount function will wait for -// -// the volume to be attached +// the volume to be attached // // nodeName - the Name for this node, used by Attach and Detach methods +// // desiredStateOfWorld - cache containing the desired state of the world +// // actualStateOfWorld - cache containing the actual state of the world -// populatorHasAddedPods - checker for whether the populator has finished // -// adding pods to the desiredStateOfWorld cache at least once after sources -// are all ready (before sources are ready, pods are probably missing) +// populatorHasAddedPods - checker for whether the populator has finished +// adding pods to the desiredStateOfWorld cache at least once after sources +// are all ready (before sources are ready, pods are probably missing) // // operationExecutor - used to trigger attach/detach/mount/unmount operations -// -// safely (prevents more than one operation from being triggered on the same -// volume) +// safely (prevents more than one operation from being triggered on the same +// volume) // // mounter - mounter passed in from kubelet, passed down unmount path +// // hostutil - hostutil passed in from kubelet +// // volumePluginMgr - volume plugin manager passed from kubelet func NewReconciler( kubeClient clientset.Interface, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconstruct_common.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconstruct_common.go index 30088cec284c..a4f5a444ddde 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconstruct_common.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconstruct_common.go @@ -20,7 +20,6 @@ import ( "fmt" "io/fs" "os" - "path" "path/filepath" "time" @@ -133,16 +132,16 @@ func getVolumesFromPodDir(podDir string) ([]podVolume, error) { continue } podName := podsDirInfo[i].Name() - podDir := path.Join(podDir, podName) + podDir := filepath.Join(podDir, podName) // Find filesystem volume information // ex. filesystem volume: /pods/{podUid}/volume/{escapeQualifiedPluginName}/{volumeName} volumesDirs := map[v1.PersistentVolumeMode]string{ - v1.PersistentVolumeFilesystem: path.Join(podDir, config.DefaultKubeletVolumesDirName), + v1.PersistentVolumeFilesystem: filepath.Join(podDir, config.DefaultKubeletVolumesDirName), } // Find block volume information // ex. block volume: /pods/{podUid}/volumeDevices/{escapeQualifiedPluginName}/{volumeName} - volumesDirs[v1.PersistentVolumeBlock] = path.Join(podDir, config.DefaultKubeletVolumeDevicesDirName) + volumesDirs[v1.PersistentVolumeBlock] = filepath.Join(podDir, config.DefaultKubeletVolumeDevicesDirName) for volumeMode, volumesDir := range volumesDirs { var volumesDirInfo []fs.DirEntry @@ -152,7 +151,7 @@ func getVolumesFromPodDir(podDir string) ([]podVolume, error) { } for _, volumeDir := range volumesDirInfo { pluginName := volumeDir.Name() - volumePluginPath := path.Join(volumesDir, pluginName) + volumePluginPath := filepath.Join(volumesDir, pluginName) volumePluginDirs, err := utilpath.ReadDirNoStat(volumePluginPath) if err != nil { klog.ErrorS(err, "Could not read volume plugin directory", "volumePluginPath", volumePluginPath) @@ -160,7 +159,7 @@ func getVolumesFromPodDir(podDir string) ([]podVolume, error) { } unescapePluginName := utilstrings.UnescapeQualifiedName(pluginName) for _, volumeName := range volumePluginDirs { - volumePath := path.Join(volumePluginPath, volumeName) + volumePath := filepath.Join(volumePluginPath, volumeName) klog.V(5).InfoS("Volume path from volume plugin directory", "podName", podName, "volumePath", volumePath) volumes = append(volumes, podVolume{ podName: volumetypes.UniquePodName(podName), diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager.go index ee58b2fcaa74..460448ded181 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager.go @@ -165,12 +165,10 @@ type podStateProvider interface { // VolumeManager interface. // // kubeClient - kubeClient is the kube API client used by DesiredStateOfWorldPopulator -// -// to communicate with the API server to fetch PV and PVC objects +// to communicate with the API server to fetch PV and PVC objects // // volumePluginMgr - the volume plugin manager used to access volume plugins. -// -// Must be pre-initialized. +// Must be pre-initialized. func NewVolumeManager( controllerAttachDetachEnabled bool, nodeName k8stypes.NodeName, @@ -421,18 +419,21 @@ func (vm *volumeManager) WaitForAttachAndMount(pod *v1.Pod) error { if err != nil { unmountedVolumes := vm.getUnmountedVolumes(uniquePodName, expectedVolumes) - // Also get unattached volumes for error message + // Also get unattached volumes and volumes not in dsw for error message unattachedVolumes := - vm.getUnattachedVolumes(expectedVolumes) + vm.getUnattachedVolumes(uniquePodName) + volumesNotInDSW := + vm.getVolumesNotInDSW(uniquePodName, expectedVolumes) if len(unmountedVolumes) == 0 { return nil } return fmt.Errorf( - "unmounted volumes=%v, unattached volumes=%v: %s", + "unmounted volumes=%v, unattached volumes=%v, failed to process volumes=%v: %s", unmountedVolumes, unattachedVolumes, + volumesNotInDSW, err) } @@ -476,15 +477,30 @@ func (vm *volumeManager) WaitForUnmount(pod *v1.Pod) error { return nil } +func (vm *volumeManager) getVolumesNotInDSW(uniquePodName types.UniquePodName, expectedVolumes []string) []string { + volumesNotInDSW := sets.NewString(expectedVolumes...) + + for _, volumeToMount := range vm.desiredStateOfWorld.GetVolumesToMount() { + if volumeToMount.PodName == uniquePodName { + volumesNotInDSW.Delete(volumeToMount.OuterVolumeSpecName) + } + } + + return volumesNotInDSW.List() +} + // getUnattachedVolumes returns a list of the volumes that are expected to be attached but // are not currently attached to the node -func (vm *volumeManager) getUnattachedVolumes(expectedVolumes []string) []string { +func (vm *volumeManager) getUnattachedVolumes(uniquePodName types.UniquePodName) []string { unattachedVolumes := []string{} - for _, volume := range expectedVolumes { - if !vm.actualStateOfWorld.VolumeExists(v1.UniqueVolumeName(volume)) { - unattachedVolumes = append(unattachedVolumes, volume) + for _, volumeToMount := range vm.desiredStateOfWorld.GetVolumesToMount() { + if volumeToMount.PodName == uniquePodName && + volumeToMount.PluginIsAttachable && + !vm.actualStateOfWorld.VolumeExists(volumeToMount.VolumeName) { + unattachedVolumes = append(unattachedVolumes, volumeToMount.OuterVolumeSpecName) } } + return unattachedVolumes } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/perfcounter_nodestats.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/perfcounter_nodestats.go index 9eee9772f65c..9bd4e13796ad 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/perfcounter_nodestats.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/perfcounter_nodestats.go @@ -157,7 +157,7 @@ func (p *perfCounterNodeStatsClient) getMachineInfo() (*cadvisorapi.MachineInfo, } return &cadvisorapi.MachineInfo{ - NumCores: processorCount(), + NumCores: ProcessorCount(), MemoryCapacity: p.nodeInfo.memoryPhysicalCapacityBytes, MachineID: hostname, SystemUUID: systemUUID, @@ -173,7 +173,7 @@ func (p *perfCounterNodeStatsClient) getMachineInfo() (*cadvisorapi.MachineInfo, // more notes for this issue: // same issue in moby: https://github.com/moby/moby/issues/38935#issuecomment-744638345 // solution in hcsshim: https://github.com/microsoft/hcsshim/blob/master/internal/processorinfo/processor_count.go -func processorCount() int { +func ProcessorCount() int { if amount := getActiveProcessorCount(allProcessorGroups); amount != 0 { return int(amount) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/hollow_kubelet.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/hollow_kubelet.go index c64056cfe4d3..4adba49b5037 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/hollow_kubelet.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/hollow_kubelet.go @@ -131,8 +131,8 @@ func (hk *HollowKubelet) Run() { select {} } -// HollowKubletOptions contains settable parameters for hollow kubelet. -type HollowKubletOptions struct { +// HollowKubeletOptions contains settable parameters for hollow kubelet. +type HollowKubeletOptions struct { NodeName string KubeletPort int KubeletReadOnlyPort int @@ -144,7 +144,7 @@ type HollowKubletOptions struct { // Builds a KubeletConfiguration for the HollowKubelet, ensuring that the // usual defaults are applied for fields we do not override. -func GetHollowKubeletConfig(opt *HollowKubletOptions) (*options.KubeletFlags, *kubeletconfig.KubeletConfiguration) { +func GetHollowKubeletConfig(opt *HollowKubeletOptions) (*options.KubeletFlags, *kubeletconfig.KubeletConfiguration) { testRootDir := utils.MakeTempDirOrDie("hollow-kubelet.", "") podFilePath := utils.MakeTempDirOrDie("static-pods", testRootDir) klog.Infof("Using %s as root dir for hollow-kubelet", testRootDir) @@ -158,7 +158,6 @@ func GetHollowKubeletConfig(opt *HollowKubletOptions) (*options.KubeletFlags, *k f.MaxPerPodContainerCount = 2 f.NodeLabels = opt.NodeLabels f.RegisterSchedulable = true - f.RemoteImageEndpoint = "unix:///run/containerd/containerd.sock" // Config struct c, err := options.NewKubeletConfiguration() @@ -166,6 +165,7 @@ func GetHollowKubeletConfig(opt *HollowKubletOptions) (*options.KubeletFlags, *k panic(err) } + c.ImageServiceEndpoint = "unix:///run/containerd/containerd.sock" c.StaticPodURL = "" c.EnableServer = true c.Address = "0.0.0.0" /* bind address */ diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/hollow_proxy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/hollow_proxy.go index bbd4bd04d5df..80ef275dee51 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/hollow_proxy.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/hollow_proxy.go @@ -21,6 +21,7 @@ import ( "time" v1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" "k8s.io/apimachinery/pkg/types" clientset "k8s.io/client-go/kubernetes" v1core "k8s.io/client-go/kubernetes/typed/core/v1" @@ -45,7 +46,6 @@ type HollowProxy struct { } type FakeProxier struct { - proxyconfig.NoopEndpointSliceHandler proxyconfig.NoopNodeHandler } @@ -53,14 +53,14 @@ func (*FakeProxier) Sync() {} func (*FakeProxier) SyncLoop() { select {} } -func (*FakeProxier) OnServiceAdd(service *v1.Service) {} -func (*FakeProxier) OnServiceUpdate(oldService, service *v1.Service) {} -func (*FakeProxier) OnServiceDelete(service *v1.Service) {} -func (*FakeProxier) OnServiceSynced() {} -func (*FakeProxier) OnEndpointsAdd(endpoints *v1.Endpoints) {} -func (*FakeProxier) OnEndpointsUpdate(oldEndpoints, endpoints *v1.Endpoints) {} -func (*FakeProxier) OnEndpointsDelete(endpoints *v1.Endpoints) {} -func (*FakeProxier) OnEndpointsSynced() {} +func (*FakeProxier) OnServiceAdd(service *v1.Service) {} +func (*FakeProxier) OnServiceUpdate(oldService, service *v1.Service) {} +func (*FakeProxier) OnServiceDelete(service *v1.Service) {} +func (*FakeProxier) OnServiceSynced() {} +func (*FakeProxier) OnEndpointSliceAdd(slice *discoveryv1.EndpointSlice) {} +func (*FakeProxier) OnEndpointSliceUpdate(oldSlice, slice *discoveryv1.EndpointSlice) {} +func (*FakeProxier) OnEndpointSliceDelete(slice *discoveryv1.EndpointSlice) {} +func (*FakeProxier) OnEndpointSlicesSynced() {} func NewHollowProxyOrDie( nodeName string, @@ -85,9 +85,14 @@ func NewHollowProxyOrDie( klog.InfoS("can't determine this node's IP, assuming 127.0.0.1") nodeIP = netutils.ParseIPSloppy("127.0.0.1") } + family := v1.IPv4Protocol + if iptInterface.IsIPv6() { + family = v1.IPv6Protocol + } // Real proxier with fake iptables, sysctl, etc underneath it. //var err error proxier, err = iptables.NewProxier( + family, iptInterface, sysctl, execer, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/dialer_others.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/dialer_others.go new file mode 100644 index 000000000000..7b02daff340e --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/dialer_others.go @@ -0,0 +1,42 @@ +//go:build !windows +// +build !windows + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package probe + +import ( + "net" + "syscall" +) + +// ProbeDialer returns a dialer optimized for probes to avoid lingering sockets on TIME-WAIT state. +// The dialer reduces the TIME-WAIT period to 1 seconds instead of the OS default of 60 seconds. +// Using 1 second instead of 0 because SO_LINGER socket option to 0 causes pending data to be +// discarded and the connection to be aborted with an RST rather than for the pending data to be +// transmitted and the connection closed cleanly with a FIN. +// Ref: https://issues.k8s.io/89898 +func ProbeDialer() *net.Dialer { + dialer := &net.Dialer{ + Control: func(network, address string, c syscall.RawConn) error { + return c.Control(func(fd uintptr) { + syscall.SetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER, &syscall.Linger{Onoff: 1, Linger: 1}) + }) + }, + } + return dialer +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/dialer_windows.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/dialer_windows.go new file mode 100644 index 000000000000..ab0d020478c5 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/dialer_windows.go @@ -0,0 +1,42 @@ +//go:build windows +// +build windows + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package probe + +import ( + "net" + "syscall" +) + +// ProbeDialer returns a dialer optimized for probes to avoid lingering sockets on TIME-WAIT state. +// The dialer reduces the TIME-WAIT period to 1 seconds instead of the OS default of 60 seconds. +// Using 1 second instead of 0 because SO_LINGER socket option to 0 causes pending data to be +// discarded and the connection to be aborted with an RST rather than for the pending data to be +// transmitted and the connection closed cleanly with a FIN. +// Ref: https://issues.k8s.io/89898 +func ProbeDialer() *net.Dialer { + dialer := &net.Dialer{ + Control: func(network, address string, c syscall.RawConn) error { + return c.Control(func(fd uintptr) { + syscall.SetsockoptLinger(syscall.Handle(fd), syscall.SOL_SOCKET, syscall.SO_LINGER, &syscall.Linger{Onoff: 1, Linger: 1}) + }) + }, + } + return dialer +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/http/http.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/http/http.go index 025418becb3c..41417657eee6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/http/http.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/http/http.go @@ -36,8 +36,7 @@ const ( // New creates Prober that will skip TLS verification while probing. // followNonLocalRedirects configures whether the prober should follow redirects to a different hostname. -// -// If disabled, redirects to other hosts will trigger a warning result. +// If disabled, redirects to other hosts will trigger a warning result. func New(followNonLocalRedirects bool) Prober { tlsConfig := &tls.Config{InsecureSkipVerify: true} return NewWithTLSConfig(tlsConfig, followNonLocalRedirects) @@ -45,8 +44,7 @@ func New(followNonLocalRedirects bool) Prober { // NewWithTLSConfig takes tls config as parameter. // followNonLocalRedirects configures whether the prober should follow redirects to a different hostname. -// -// If disabled, redirects to other hosts will trigger a warning result. +// If disabled, redirects to other hosts will trigger a warning result. func NewWithTLSConfig(config *tls.Config, followNonLocalRedirects bool) Prober { // We do not want the probe use node's local proxy set. transport := utilnet.SetTransportDefaults( @@ -55,7 +53,11 @@ func NewWithTLSConfig(config *tls.Config, followNonLocalRedirects bool) Prober { DisableKeepAlives: true, Proxy: http.ProxyURL(nil), DisableCompression: true, // removes Accept-Encoding header + // DialContext creates unencrypted TCP connections + // and is also used by the transport for HTTPS connection + DialContext: probe.ProbeDialer().DialContext, }) + return httpProber{transport, followNonLocalRedirects} } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/tcp/tcp.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/tcp/tcp.go index 771bc047710f..7ce1450470d2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/tcp/tcp.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/tcp/tcp.go @@ -38,7 +38,7 @@ type Prober interface { type tcpProber struct{} -// Probe returns a ProbeRunner capable of running an TCP check. +// Probe checks that a TCP connection to the address can be opened. func (pr tcpProber) Probe(host string, port int, timeout time.Duration) (probe.Result, string, error) { return DoTCPProbe(net.JoinHostPort(host, strconv.Itoa(port)), timeout) } @@ -48,7 +48,9 @@ func (pr tcpProber) Probe(host string, port int, timeout time.Duration) (probe.R // If the socket fails to open, it returns Failure. // This is exported because some other packages may want to do direct TCP probes. func DoTCPProbe(addr string, timeout time.Duration) (probe.Result, string, error) { - conn, err := net.DialTimeout("tcp", addr, timeout) + d := probe.ProbeDialer() + d.Timeout = timeout + conn, err := d.Dial("tcp", addr) if err != nil { // Convert errors to failures to handle timeouts. return probe.Failure, err.Error(), nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/types.go index 8674e54e1da1..eec55133ee94 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/types.go @@ -218,48 +218,6 @@ const ( LocalModeInterfaceNamePrefix LocalMode = "InterfaceNamePrefix" ) -// IPVSSchedulerMethod is the algorithm for allocating TCP connections and -// UDP datagrams to real servers. Scheduling algorithms are implemented as kernel modules. -// Ten are shipped with the Linux Virtual Server. -type IPVSSchedulerMethod string - -const ( - // RoundRobin distributes jobs equally amongst the available real servers. - RoundRobin IPVSSchedulerMethod = "rr" - // WeightedRoundRobin assigns jobs to real servers proportionally to their real servers' weight. - // Servers with higher weights receive new jobs first and get more jobs than servers with lower weights. - // Servers with equal weights get an equal distribution of new jobs. - WeightedRoundRobin IPVSSchedulerMethod = "wrr" - // LeastConnection assigns more jobs to real servers with fewer active jobs. - LeastConnection IPVSSchedulerMethod = "lc" - // WeightedLeastConnection assigns more jobs to servers with fewer jobs and - // relative to the real servers' weight(Ci/Wi). - WeightedLeastConnection IPVSSchedulerMethod = "wlc" - // LocalityBasedLeastConnection assigns jobs destined for the same IP address to the same server if - // the server is not overloaded and available; otherwise assigns jobs to servers with fewer jobs, - // and keep it for future assignment. - LocalityBasedLeastConnection IPVSSchedulerMethod = "lblc" - // LocalityBasedLeastConnectionWithReplication with Replication assigns jobs destined for the same IP address to the - // least-connection node in the server set for the IP address. If all the node in the server set are overloaded, - // it picks up a node with fewer jobs in the cluster and adds it to the sever set for the target. - // If the server set has not been modified for the specified time, the most loaded node is removed from the server set, - // in order to avoid high degree of replication. - LocalityBasedLeastConnectionWithReplication IPVSSchedulerMethod = "lblcr" - // SourceHashing assigns jobs to servers through looking up a statically assigned hash table - // by their source IP addresses. - SourceHashing IPVSSchedulerMethod = "sh" - // DestinationHashing assigns jobs to servers through looking up a statically assigned hash table - // by their destination IP addresses. - DestinationHashing IPVSSchedulerMethod = "dh" - // ShortestExpectedDelay assigns an incoming job to the server with the shortest expected delay. - // The expected delay that the job will experience is (Ci + 1) / Ui if sent to the ith server, in which - // Ci is the number of jobs on the ith server and Ui is the fixed service rate (weight) of the ith server. - ShortestExpectedDelay IPVSSchedulerMethod = "sed" - // NeverQueue assigns an incoming job to an idle server if there is, instead of waiting for a fast one; - // if all the servers are busy, it adopts the ShortestExpectedDelay policy to assign the job. - NeverQueue IPVSSchedulerMethod = "nq" -) - func (m *ProxyMode) Set(s string) error { *m = ProxyMode(s) return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/validation/validation.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/validation/validation.go index 40c58d8104ed..607a5928aaaa 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/validation/validation.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/validation/validation.go @@ -143,7 +143,6 @@ func validateKubeProxyIPVSConfiguration(config kubeproxyconfig.KubeProxyIPVSConf } allErrs = append(allErrs, validateIPVSTimeout(config, fldPath)...) - allErrs = append(allErrs, validateIPVSSchedulerMethod(kubeproxyconfig.IPVSSchedulerMethod(config.Scheduler), fldPath.Child("Scheduler"))...) allErrs = append(allErrs, validateIPVSExcludeCIDRs(config.ExcludeCIDRs, fldPath.Child("ExcludeCidrs"))...) return allErrs @@ -234,36 +233,6 @@ func validateHostPort(input string, fldPath *field.Path) field.ErrorList { return allErrs } -func validateIPVSSchedulerMethod(scheduler kubeproxyconfig.IPVSSchedulerMethod, fldPath *field.Path) field.ErrorList { - supportedMethod := []kubeproxyconfig.IPVSSchedulerMethod{ - kubeproxyconfig.RoundRobin, - kubeproxyconfig.WeightedRoundRobin, - kubeproxyconfig.LeastConnection, - kubeproxyconfig.WeightedLeastConnection, - kubeproxyconfig.LocalityBasedLeastConnection, - kubeproxyconfig.LocalityBasedLeastConnectionWithReplication, - kubeproxyconfig.SourceHashing, - kubeproxyconfig.DestinationHashing, - kubeproxyconfig.ShortestExpectedDelay, - kubeproxyconfig.NeverQueue, - "", - } - allErrs := field.ErrorList{} - var found bool - for i := range supportedMethod { - if scheduler == supportedMethod[i] { - found = true - break - } - } - // Not found - if !found { - errMsg := fmt.Sprintf("must be in %v, blank means the default algorithm method (currently rr)", supportedMethod) - allErrs = append(allErrs, field.Invalid(fldPath.Child("Scheduler"), string(scheduler), errMsg)) - } - return allErrs -} - func validateKubeProxyNodePortAddress(nodePortAddresses []string, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/config/config.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/config/config.go index 065b499c84b0..090062c46ba9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/config/config.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/config/config.go @@ -46,25 +46,6 @@ type ServiceHandler interface { OnServiceSynced() } -// EndpointsHandler is an abstract interface of objects which receive -// notifications about endpoints object changes. This is not a required -// sub-interface of proxy.Provider, and proxy implementations should -// not implement it unless they can't handle EndpointSlices. -type EndpointsHandler interface { - // OnEndpointsAdd is called whenever creation of new endpoints object - // is observed. - OnEndpointsAdd(endpoints *v1.Endpoints) - // OnEndpointsUpdate is called whenever modification of an existing - // endpoints object is observed. - OnEndpointsUpdate(oldEndpoints, endpoints *v1.Endpoints) - // OnEndpointsDelete is called whenever deletion of an existing endpoints - // object is observed. - OnEndpointsDelete(endpoints *v1.Endpoints) - // OnEndpointsSynced is called once all the initial event handlers were - // called and the state is fully propagated to local cache. - OnEndpointsSynced() -} - // EndpointSliceHandler is an abstract interface of objects which receive // notifications about endpoint slice object changes. type EndpointSliceHandler interface { @@ -82,116 +63,6 @@ type EndpointSliceHandler interface { OnEndpointSlicesSynced() } -// NoopEndpointSliceHandler is a noop handler for proxiers that have not yet -// implemented a full EndpointSliceHandler. -type NoopEndpointSliceHandler struct{} - -// OnEndpointSliceAdd is a noop handler for EndpointSlice creates. -func (*NoopEndpointSliceHandler) OnEndpointSliceAdd(endpointSlice *discovery.EndpointSlice) {} - -// OnEndpointSliceUpdate is a noop handler for EndpointSlice updates. -func (*NoopEndpointSliceHandler) OnEndpointSliceUpdate(oldEndpointSlice, newEndpointSlice *discovery.EndpointSlice) { -} - -// OnEndpointSliceDelete is a noop handler for EndpointSlice deletes. -func (*NoopEndpointSliceHandler) OnEndpointSliceDelete(endpointSlice *discovery.EndpointSlice) {} - -// OnEndpointSlicesSynced is a noop handler for EndpointSlice syncs. -func (*NoopEndpointSliceHandler) OnEndpointSlicesSynced() {} - -var _ EndpointSliceHandler = &NoopEndpointSliceHandler{} - -// EndpointsConfig tracks a set of endpoints configurations. -type EndpointsConfig struct { - listerSynced cache.InformerSynced - eventHandlers []EndpointsHandler -} - -// NewEndpointsConfig creates a new EndpointsConfig. -func NewEndpointsConfig(endpointsInformer coreinformers.EndpointsInformer, resyncPeriod time.Duration) *EndpointsConfig { - result := &EndpointsConfig{ - listerSynced: endpointsInformer.Informer().HasSynced, - } - - endpointsInformer.Informer().AddEventHandlerWithResyncPeriod( - cache.ResourceEventHandlerFuncs{ - AddFunc: result.handleAddEndpoints, - UpdateFunc: result.handleUpdateEndpoints, - DeleteFunc: result.handleDeleteEndpoints, - }, - resyncPeriod, - ) - - return result -} - -// RegisterEventHandler registers a handler which is called on every endpoints change. -func (c *EndpointsConfig) RegisterEventHandler(handler EndpointsHandler) { - c.eventHandlers = append(c.eventHandlers, handler) -} - -// Run waits for cache synced and invokes handlers after syncing. -func (c *EndpointsConfig) Run(stopCh <-chan struct{}) { - klog.InfoS("Starting endpoints config controller") - - if !cache.WaitForNamedCacheSync("endpoints config", stopCh, c.listerSynced) { - return - } - - for i := range c.eventHandlers { - klog.V(3).InfoS("Calling handler.OnEndpointsSynced()") - c.eventHandlers[i].OnEndpointsSynced() - } -} - -func (c *EndpointsConfig) handleAddEndpoints(obj interface{}) { - endpoints, ok := obj.(*v1.Endpoints) - if !ok { - utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", obj)) - return - } - for i := range c.eventHandlers { - klog.V(4).InfoS("Calling handler.OnEndpointsAdd") - c.eventHandlers[i].OnEndpointsAdd(endpoints) - } -} - -func (c *EndpointsConfig) handleUpdateEndpoints(oldObj, newObj interface{}) { - oldEndpoints, ok := oldObj.(*v1.Endpoints) - if !ok { - utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", oldObj)) - return - } - endpoints, ok := newObj.(*v1.Endpoints) - if !ok { - utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", newObj)) - return - } - for i := range c.eventHandlers { - klog.V(4).InfoS("Calling handler.OnEndpointsUpdate") - c.eventHandlers[i].OnEndpointsUpdate(oldEndpoints, endpoints) - } -} - -func (c *EndpointsConfig) handleDeleteEndpoints(obj interface{}) { - endpoints, ok := obj.(*v1.Endpoints) - if !ok { - tombstone, ok := obj.(cache.DeletedFinalStateUnknown) - if !ok { - utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", obj)) - return - } - if endpoints, ok = tombstone.Obj.(*v1.Endpoints); !ok { - utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", obj)) - return - } - } - for i := range c.eventHandlers { - klog.V(4).InfoS("Calling handler.OnEndpointsDelete") - c.eventHandlers[i].OnEndpointsDelete(endpoints) - } -} - // EndpointSliceConfig tracks a set of endpoints configurations. type EndpointSliceConfig struct { listerSynced cache.InformerSynced diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/endpoints.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/endpoints.go index ca7071a9eb62..23fe42fa0f92 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/endpoints.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/endpoints.go @@ -18,7 +18,6 @@ package proxy import ( "net" - "reflect" "strconv" "sync" "time" @@ -32,7 +31,6 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/kubernetes/pkg/proxy/metrics" utilproxy "k8s.io/kubernetes/pkg/proxy/util" - utilnet "k8s.io/utils/net" ) var supportedEndpointSliceAddressTypes = sets.NewString( @@ -159,20 +157,12 @@ type processEndpointsMapChangeFunc func(oldEndpointsMap, newEndpointsMap Endpoin // EndpointChangeTracker carries state about uncommitted changes to an arbitrary number of // Endpoints, keyed by their namespace and name. type EndpointChangeTracker struct { - // lock protects items. + // lock protects lastChangeTriggerTimes lock sync.Mutex - // hostname is the host where kube-proxy is running. - hostname string - // items maps a service to is endpointsChange. - items map[types.NamespacedName]*endpointsChange - // makeEndpointInfo allows proxier to inject customized information when processing endpoint. - makeEndpointInfo makeEndpointFunc + processEndpointsMapChange processEndpointsMapChangeFunc // endpointSliceCache holds a simplified version of endpoint slices. endpointSliceCache *EndpointSliceCache - // ipfamily identify the ip family on which the tracker is operating on - ipFamily v1.IPFamily - recorder events.EventRecorder // Map from the Endpoints namespaced-name to the times of the triggers that caused the endpoints // object to change. Used to calculate the network-programming-latency. lastChangeTriggerTimes map[types.NamespacedName][]time.Time @@ -186,11 +176,6 @@ type EndpointChangeTracker struct { // NewEndpointChangeTracker initializes an EndpointsChangeMap func NewEndpointChangeTracker(hostname string, makeEndpointInfo makeEndpointFunc, ipFamily v1.IPFamily, recorder events.EventRecorder, processEndpointsMapChange processEndpointsMapChangeFunc) *EndpointChangeTracker { return &EndpointChangeTracker{ - hostname: hostname, - items: make(map[types.NamespacedName]*endpointsChange), - makeEndpointInfo: makeEndpointInfo, - ipFamily: ipFamily, - recorder: recorder, lastChangeTriggerTimes: make(map[types.NamespacedName][]time.Time), trackerStartTime: time.Now(), processEndpointsMapChange: processEndpointsMapChange, @@ -198,66 +183,6 @@ func NewEndpointChangeTracker(hostname string, makeEndpointInfo makeEndpointFunc } } -// Update updates given service's endpoints change map based on the endpoints pair. It returns true -// if items changed, otherwise return false. Update can be used to add/update/delete items of EndpointsChangeMap. For example, -// Add item -// - pass as the pair. -// -// Update item -// - pass as the pair. -// -// Delete item -// - pass as the pair. -func (ect *EndpointChangeTracker) Update(previous, current *v1.Endpoints) bool { - endpoints := current - if endpoints == nil { - endpoints = previous - } - // previous == nil && current == nil is unexpected, we should return false directly. - if endpoints == nil { - return false - } - metrics.EndpointChangesTotal.Inc() - namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name} - - ect.lock.Lock() - defer ect.lock.Unlock() - - change, exists := ect.items[namespacedName] - if !exists { - change = &endpointsChange{} - change.previous = ect.endpointsToEndpointsMap(previous) - ect.items[namespacedName] = change - } - - // In case of Endpoints deletion, the LastChangeTriggerTime annotation is - // by-definition coming from the time of last update, which is not what - // we want to measure. So we simply ignore it in this cases. - if t := getLastChangeTriggerTime(endpoints.Annotations); !t.IsZero() && current != nil && t.After(ect.trackerStartTime) { - ect.lastChangeTriggerTimes[namespacedName] = append(ect.lastChangeTriggerTimes[namespacedName], t) - } - - change.current = ect.endpointsToEndpointsMap(current) - // if change.previous equal to change.current, it means no change - if reflect.DeepEqual(change.previous, change.current) { - delete(ect.items, namespacedName) - // Reset the lastChangeTriggerTimes for the Endpoints object. Given that the network programming - // SLI is defined as the duration between a time of an event and a time when the network was - // programmed to incorporate that event, if there are events that happened between two - // consecutive syncs and that canceled each other out, e.g. pod A added -> pod A deleted, - // there will be no network programming for them and thus no network programming latency metric - // should be exported. - delete(ect.lastChangeTriggerTimes, namespacedName) - } else { - for spn, eps := range change.current { - klog.V(2).InfoS("Service port endpoints update", "servicePort", spn, "endpoints", len(eps)) - } - } - - metrics.EndpointChangesPending.Set(float64(len(ect.items))) - return len(ect.items) > 0 -} - // EndpointSliceUpdate updates given service's endpoints change map based on the endpoints pair. // It returns true if items changed, otherwise return false. Will add/update/delete items of EndpointsChangeMap. // If removeSlice is true, slice will be removed, otherwise it will be added or updated. @@ -293,7 +218,9 @@ func (ect *EndpointChangeTracker) EndpointSliceUpdate(endpointSlice *discovery.E // we want to measure. So we simply ignore it in this cases. // TODO(wojtek-t, robscott): Address the problem for EndpointSlice deletion // when other EndpointSlice for that service still exist. - if t := getLastChangeTriggerTime(endpointSlice.Annotations); !t.IsZero() && !removeSlice && t.After(ect.trackerStartTime) { + if removeSlice { + delete(ect.lastChangeTriggerTimes, namespacedName) + } else if t := getLastChangeTriggerTime(endpointSlice.Annotations); !t.IsZero() && t.After(ect.trackerStartTime) { ect.lastChangeTriggerTimes[namespacedName] = append(ect.lastChangeTriggerTimes[namespacedName], t) } @@ -306,38 +233,15 @@ func (ect *EndpointChangeTracker) EndpointSliceUpdate(endpointSlice *discovery.E // have changed since the last time ect was used to update an EndpointsMap. (You must call // this _before_ calling em.Update(ect).) func (ect *EndpointChangeTracker) PendingChanges() sets.String { - if ect.endpointSliceCache != nil { - return ect.endpointSliceCache.pendingChanges() - } - - ect.lock.Lock() - defer ect.lock.Unlock() - - changes := sets.NewString() - for name := range ect.items { - changes.Insert(name.String()) - } - return changes + return ect.endpointSliceCache.pendingChanges() } // checkoutChanges returns a list of pending endpointsChanges and marks them as // applied. func (ect *EndpointChangeTracker) checkoutChanges() []*endpointsChange { - ect.lock.Lock() - defer ect.lock.Unlock() - metrics.EndpointChangesPending.Set(0) - if ect.endpointSliceCache != nil { - return ect.endpointSliceCache.checkoutChanges() - } - - changes := []*endpointsChange{} - for _, change := range ect.items { - changes = append(changes, change) - } - ect.items = make(map[types.NamespacedName]*endpointsChange) - return changes + return ect.endpointSliceCache.checkoutChanges() } // checkoutTriggerTimes applies the locally cached trigger times to a map of @@ -424,76 +328,6 @@ func (em EndpointsMap) Update(changes *EndpointChangeTracker) (result UpdateEndp // EndpointsMap maps a service name to a list of all its Endpoints. type EndpointsMap map[ServicePortName][]Endpoint -// endpointsToEndpointsMap translates single Endpoints object to EndpointsMap. -// This function is used for incremental updated of endpointsMap. -// -// NOTE: endpoints object should NOT be modified. -func (ect *EndpointChangeTracker) endpointsToEndpointsMap(endpoints *v1.Endpoints) EndpointsMap { - if endpoints == nil { - return nil - } - - endpointsMap := make(EndpointsMap) - // We need to build a map of portname -> all ip:ports for that portname. - // Explode Endpoints.Subsets[*] into this structure. - for i := range endpoints.Subsets { - ss := &endpoints.Subsets[i] - for i := range ss.Ports { - port := &ss.Ports[i] - if port.Port == 0 { - klog.ErrorS(nil, "Ignoring invalid endpoint port", "portName", port.Name) - continue - } - svcPortName := ServicePortName{ - NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, - Port: port.Name, - Protocol: port.Protocol, - } - for i := range ss.Addresses { - addr := &ss.Addresses[i] - if addr.IP == "" { - klog.ErrorS(nil, "Ignoring invalid endpoint port with empty host", "portName", port.Name) - continue - } - - // Filter out the incorrect IP version case. - // Any endpoint port that contains incorrect IP version will be ignored. - if (ect.ipFamily == v1.IPv6Protocol) != utilnet.IsIPv6String(addr.IP) { - // Emit event on the corresponding service which had a different - // IP version than the endpoint. - utilproxy.LogAndEmitIncorrectIPVersionEvent(ect.recorder, "endpoints", addr.IP, endpoints.Namespace, endpoints.Name, "") - continue - } - - // it is safe to assume that any address in endpoints.subsets[*].addresses is - // ready and NOT terminating - isReady := true - isServing := true - isTerminating := false - isLocal := false - nodeName := "" - if addr.NodeName != nil { - isLocal = *addr.NodeName == ect.hostname - nodeName = *addr.NodeName - } - // Only supported with EndpointSlice API - zoneHints := sets.String{} - - // Zone information is only supported with EndpointSlice API - baseEndpointInfo := newBaseEndpointInfo(addr.IP, nodeName, "", int(port.Port), isLocal, isReady, isServing, isTerminating, zoneHints) - if ect.makeEndpointInfo != nil { - endpointsMap[svcPortName] = append(endpointsMap[svcPortName], ect.makeEndpointInfo(baseEndpointInfo, &svcPortName)) - } else { - endpointsMap[svcPortName] = append(endpointsMap[svcPortName], baseEndpointInfo) - } - } - - klog.V(3).InfoS("Setting endpoints for service port", "portName", svcPortName, "endpoints", formatEndpointsList(endpointsMap[svcPortName])) - } - } - return endpointsMap -} - // apply the changes to EndpointsMap and updates stale endpoints and service-endpoints pair. The `staleEndpoints` argument // is passed in to store the stale udp endpoints and `staleServiceNames` argument is passed in to store the stale udp service. // The changes map is cleared after applying them. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/common.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/common.go index f65cf1666e8f..2013508c1c4c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/common.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/common.go @@ -39,6 +39,7 @@ type httpServerFactory interface { // It is designed so that http.Server satisfies this interface, type httpServer interface { Serve(listener net.Listener) error + Close() error } // Implement listener in terms of net.Listen. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/service_health.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/service_health.go index 8f97a2190579..9a71ae7a09de 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/service_health.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/service_health.go @@ -150,7 +150,6 @@ type hcInstance struct { nsn types.NamespacedName port uint16 - listeners []net.Listener httpServers []httpServer endpoints int // number of local endpoints for a service @@ -162,7 +161,6 @@ func (hcI *hcInstance) listenAndServeAll(hcs *server) error { var listener net.Listener addresses := hcs.nodeAddresses.List() - hcI.listeners = make([]net.Listener, 0, len(addresses)) hcI.httpServers = make([]httpServer, 0, len(addresses)) // for each of the node addresses start listening and serving @@ -181,16 +179,15 @@ func (hcI *hcInstance) listenAndServeAll(hcs *server) error { // start serving go func(hcI *hcInstance, listener net.Listener, httpSrv httpServer) { - // Serve() will exit when the listener is closed. + // Serve() will exit and return ErrServerClosed when the http server is closed. klog.V(3).InfoS("Starting goroutine for healthcheck", "service", hcI.nsn, "address", listener.Addr()) - if err := httpSrv.Serve(listener); err != nil { + if err := httpSrv.Serve(listener); err != nil && err != http.ErrServerClosed { klog.ErrorS(err, "Healthcheck closed", "service", hcI.nsn) return } klog.V(3).InfoS("Healthcheck closed", "service", hcI.nsn, "address", listener.Addr()) }(hcI, listener, httpSrv) - hcI.listeners = append(hcI.listeners, listener) hcI.httpServers = append(hcI.httpServers, httpSrv) } @@ -199,9 +196,9 @@ func (hcI *hcInstance) listenAndServeAll(hcs *server) error { func (hcI *hcInstance) closeAll() error { errors := []error{} - for _, listener := range hcI.listeners { - if err := listener.Close(); err != nil { - klog.ErrorS(err, "Error closing listener for health check service", "service", hcI.nsn, "address", listener.Addr()) + for _, server := range hcI.httpServers { + if err := server.Close(); err != nil { + klog.ErrorS(err, "Error closing server for health check service", "service", hcI.nsn) errors = append(errors, err) } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go index e141b6f7082f..fbde2f8db8bb 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go @@ -221,7 +221,8 @@ var _ proxy.Provider = &Proxier{} // An error will be returned if iptables fails to update or acquire the initial lock. // Once a proxier is created, it will keep iptables up to date in the background and // will not terminate if a particular iptables call fails. -func NewProxier(ipt utiliptables.Interface, +func NewProxier(ipFamily v1.IPFamily, + ipt utiliptables.Interface, sysctl utilsysctl.Interface, exec utilexec.Interface, syncPeriod time.Duration, @@ -259,18 +260,6 @@ func NewProxier(ipt utiliptables.Interface, serviceHealthServer := healthcheck.NewServiceHealthServer(hostname, recorder, nodePortAddresses) - ipFamily := v1.IPv4Protocol - if ipt.IsIPv6() { - ipFamily = v1.IPv6Protocol - } - - ipFamilyMap := utilproxy.MapCIDRsByIPFamily(nodePortAddresses) - nodePortAddresses = ipFamilyMap[ipFamily] - // Log the IPs not matching the ipFamily - if ips, ok := ipFamilyMap[utilproxy.OtherIPFamily(ipFamily)]; ok && len(ips) > 0 { - klog.InfoS("Found node IPs of the wrong family", "ipFamily", ipFamily, "IPs", strings.Join(ips, ",")) - } - proxier := &Proxier{ svcPortMap: make(proxy.ServicePortMap), serviceChanges: proxy.NewServiceChangeTracker(newServiceInfo, ipFamily, recorder, nil), @@ -337,14 +326,14 @@ func NewDualStackProxier( ) (proxy.Provider, error) { // Create an ipv4 instance of the single-stack proxier ipFamilyMap := utilproxy.MapCIDRsByIPFamily(nodePortAddresses) - ipv4Proxier, err := NewProxier(ipt[0], sysctl, + ipv4Proxier, err := NewProxier(v1.IPv4Protocol, ipt[0], sysctl, exec, syncPeriod, minSyncPeriod, masqueradeAll, localhostNodePorts, masqueradeBit, localDetectors[0], hostname, nodeIP[0], recorder, healthzServer, ipFamilyMap[v1.IPv4Protocol]) if err != nil { return nil, fmt.Errorf("unable to create ipv4 proxier: %v", err) } - ipv6Proxier, err := NewProxier(ipt[1], sysctl, + ipv6Proxier, err := NewProxier(v1.IPv6Protocol, ipt[1], sysctl, exec, syncPeriod, minSyncPeriod, masqueradeAll, false, masqueradeBit, localDetectors[1], hostname, nodeIP[1], recorder, healthzServer, ipFamilyMap[v1.IPv6Protocol]) if err != nil { @@ -863,19 +852,31 @@ func (proxier *Proxier) syncProxyRules() { } }() - // Create and link the kube chains. - for _, jump := range iptablesJumpChains { - if _, err := proxier.iptables.EnsureChain(jump.table, jump.dstChain); err != nil { - klog.ErrorS(err, "Failed to ensure chain exists", "table", jump.table, "chain", jump.dstChain) - return - } - args := append(jump.extraArgs, - "-m", "comment", "--comment", jump.comment, - "-j", string(jump.dstChain), - ) - if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, jump.table, jump.srcChain, args...); err != nil { - klog.ErrorS(err, "Failed to ensure chain jumps", "table", jump.table, "srcChain", jump.srcChain, "dstChain", jump.dstChain) - return + if !tryPartialSync { + // Ensure that our jump rules (eg from PREROUTING to KUBE-SERVICES) exist. + // We can't do this as part of the iptables-restore because we don't want + // to specify/replace *all* of the rules in PREROUTING, etc. + // + // We need to create these rules when kube-proxy first starts, and we need + // to recreate them if the utiliptables Monitor detects that iptables has + // been flushed. In both of those cases, the code will force a full sync. + // In all other cases, it ought to be safe to assume that the rules + // already exist, so we'll skip this step when doing a partial sync, to + // save us from having to invoke /sbin/iptables 20 times on each sync + // (which will be very slow on hosts with lots of iptables rules). + for _, jump := range iptablesJumpChains { + if _, err := proxier.iptables.EnsureChain(jump.table, jump.dstChain); err != nil { + klog.ErrorS(err, "Failed to ensure chain exists", "table", jump.table, "chain", jump.dstChain) + return + } + args := append(jump.extraArgs, + "-m", "comment", "--comment", jump.comment, + "-j", string(jump.dstChain), + ) + if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, jump.table, jump.srcChain, args...); err != nil { + klog.ErrorS(err, "Failed to ensure chain jumps", "table", jump.table, "srcChain", jump.srcChain, "dstChain", jump.dstChain) + return + } } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go index 6f013cac7439..21b4e550422f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go @@ -21,11 +21,9 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net" "os" "reflect" - "regexp" "strconv" "strings" "sync" @@ -293,7 +291,7 @@ type Proxier struct { // sets.String is used here since we end up calculating endpoint topology multiple times for the same Service // if it has multiple ports but each Service should only be counted once. serviceNoLocalEndpointsInternal sets.String - // serviceNoLocalEndpointsExternal irepresents the set of services that couldn't be applied + // serviceNoLocalEndpointsExternal represents the set of services that couldn't be applied // due to the absence of any endpoints when the external traffic policy is "Local". // It is used to publish the sync_proxy_rules_no_endpoints_total // metric with the traffic_policy label set to "external". @@ -356,7 +354,8 @@ var _ proxy.Provider = &Proxier{} // An error will be returned if it fails to update or acquire the initial lock. // Once a proxier is created, it will keep iptables and ipvs rules up to date in the background and // will not terminate if a particular iptables or ipvs call fails. -func NewProxier(ipt utiliptables.Interface, +func NewProxier(ipFamily v1.IPFamily, + ipt utiliptables.Interface, ipvs utilipvs.Interface, ipset utilipset.Interface, sysctl utilsysctl.Interface, @@ -451,11 +450,6 @@ func NewProxier(ipt utiliptables.Interface, masqueradeValue := 1 << uint(masqueradeBit) masqueradeMark := fmt.Sprintf("%#08x", masqueradeValue) - ipFamily := v1.IPv4Protocol - if ipt.IsIPv6() { - ipFamily = v1.IPv6Protocol - } - klog.V(2).InfoS("Record nodeIP and family", "nodeIP", nodeIP, "family", ipFamily) if len(scheduler) == 0 { @@ -465,13 +459,6 @@ func NewProxier(ipt utiliptables.Interface, serviceHealthServer := healthcheck.NewServiceHealthServer(hostname, recorder, nodePortAddresses) - ipFamilyMap := utilproxy.MapCIDRsByIPFamily(nodePortAddresses) - nodePortAddresses = ipFamilyMap[ipFamily] - // Log the IPs not matching the ipFamily - if ips, ok := ipFamilyMap[utilproxy.OtherIPFamily(ipFamily)]; ok && len(ips) > 0 { - klog.InfoS("Found node IPs of the wrong family", "ipFamily", ipFamily, "IPs", ips) - } - // excludeCIDRs has been validated before, here we just parse it to IPNet list parsedExcludeCIDRs, _ := netutils.ParseCIDRs(excludeCIDRs) @@ -553,7 +540,7 @@ func NewDualStackProxier( ipFamilyMap := utilproxy.MapCIDRsByIPFamily(nodePortAddresses) // Create an ipv4 instance of the single-stack proxier - ipv4Proxier, err := NewProxier(ipt[0], ipvs, safeIpset, sysctl, + ipv4Proxier, err := NewProxier(v1.IPv4Protocol, ipt[0], ipvs, safeIpset, sysctl, exec, syncPeriod, minSyncPeriod, filterCIDRs(false, excludeCIDRs), strictARP, tcpTimeout, tcpFinTimeout, udpTimeout, masqueradeAll, masqueradeBit, localDetectors[0], hostname, nodeIP[0], @@ -562,7 +549,7 @@ func NewDualStackProxier( return nil, fmt.Errorf("unable to create ipv4 proxier: %v", err) } - ipv6Proxier, err := NewProxier(ipt[1], ipvs, safeIpset, sysctl, + ipv6Proxier, err := NewProxier(v1.IPv6Protocol, ipt[1], ipvs, safeIpset, sysctl, exec, syncPeriod, minSyncPeriod, filterCIDRs(true, excludeCIDRs), strictARP, tcpTimeout, tcpFinTimeout, udpTimeout, masqueradeAll, masqueradeBit, localDetectors[1], hostname, nodeIP[1], @@ -607,7 +594,6 @@ func newServiceInfo(port *v1.ServicePort, service *v1.Service, bsvcPortInfo *pro // KernelHandler can handle the current installed kernel modules. type KernelHandler interface { - GetModules() ([]string, error) GetKernelVersion() (string, error) } @@ -623,77 +609,10 @@ func NewLinuxKernelHandler() *LinuxKernelHandler { } } -// GetModules returns all installed kernel modules. -func (handle *LinuxKernelHandler) GetModules() ([]string, error) { - // Check whether IPVS required kernel modules are built-in - kernelVersionStr, err := handle.GetKernelVersion() - if err != nil { - return nil, err - } - kernelVersion, err := version.ParseGeneric(kernelVersionStr) - if err != nil { - return nil, fmt.Errorf("error parsing kernel version %q: %v", kernelVersionStr, err) - } - ipvsModules := utilipvs.GetRequiredIPVSModules(kernelVersion) - - var bmods, lmods []string - - // Find out loaded kernel modules. If this is a full static kernel it will try to verify if the module is compiled using /boot/config-KERNELVERSION - modulesFile, err := os.Open("/proc/modules") - if err == os.ErrNotExist { - klog.ErrorS(err, "Failed to read file /proc/modules, assuming this is a kernel without loadable modules support enabled") - kernelConfigFile := fmt.Sprintf("/boot/config-%s", kernelVersionStr) - kConfig, err := ioutil.ReadFile(kernelConfigFile) - if err != nil { - return nil, fmt.Errorf("failed to read Kernel Config file %s with error %w", kernelConfigFile, err) - } - for _, module := range ipvsModules { - if match, _ := regexp.Match("CONFIG_"+strings.ToUpper(module)+"=y", kConfig); match { - bmods = append(bmods, module) - } - } - return bmods, nil - } - if err != nil { - return nil, fmt.Errorf("failed to read file /proc/modules with error %w", err) - } - defer modulesFile.Close() - - mods, err := getFirstColumn(modulesFile) - if err != nil { - return nil, fmt.Errorf("failed to find loaded kernel modules: %v", err) - } - - builtinModsFilePath := fmt.Sprintf("/lib/modules/%s/modules.builtin", kernelVersionStr) - b, err := ioutil.ReadFile(builtinModsFilePath) - if err != nil { - klog.ErrorS(err, "Failed to read builtin modules file, you can ignore this message when kube-proxy is running inside container without mounting /lib/modules", "filePath", builtinModsFilePath) - } - - for _, module := range ipvsModules { - if match, _ := regexp.Match(module+".ko", b); match { - bmods = append(bmods, module) - } else { - // Try to load the required IPVS kernel modules if not built in - err := handle.executor.Command("modprobe", "--", module).Run() - if err != nil { - klog.InfoS("Failed to load kernel module with modprobe, "+ - "you can ignore this message when kube-proxy is running inside container without mounting /lib/modules", "moduleName", module) - } else { - lmods = append(lmods, module) - } - } - } - - mods = append(mods, bmods...) - mods = append(mods, lmods...) - return mods, nil -} - // getFirstColumn reads all the content from r into memory and return a // slice which consists of the first word from each line. func getFirstColumn(r io.Reader) ([]string, error) { - b, err := ioutil.ReadAll(r) + b, err := io.ReadAll(r) if err != nil { return nil, err } @@ -712,7 +631,7 @@ func getFirstColumn(r io.Reader) ([]string, error) { // GetKernelVersion returns currently running kernel version. func (handle *LinuxKernelHandler) GetKernelVersion() (string, error) { kernelVersionFile := "/proc/sys/kernel/osrelease" - fileContent, err := ioutil.ReadFile(kernelVersionFile) + fileContent, err := os.ReadFile(kernelVersionFile) if err != nil { return "", fmt.Errorf("error reading osrelease file %q: %v", kernelVersionFile, err) } @@ -721,61 +640,92 @@ func (handle *LinuxKernelHandler) GetKernelVersion() (string, error) { } // CanUseIPVSProxier checks if we can use the ipvs Proxier. -// This is determined by checking if all the required kernel modules can be loaded. It may -// return an error if it fails to get the kernel modules information without error, in which -// case it will also return false. -func CanUseIPVSProxier(handle KernelHandler, ipsetver IPSetVersioner, scheduler string) error { - mods, err := handle.GetModules() - if err != nil { - return fmt.Errorf("error getting installed ipvs required kernel modules: %v", err) +// The ipset version and the scheduler are checked. If any virtual servers (VS) +// already exist with the configured scheduler, we just return. Otherwise +// we check if a dummy VS can be configured with the configured scheduler. +// Kernel modules will be loaded automatically if necessary. +func CanUseIPVSProxier(ipvs utilipvs.Interface, ipsetver IPSetVersioner, scheduler string) error { + // BUG: https://github.com/moby/ipvs/issues/27 + // If ipvs is not compiled into the kernel no error is returned and handle==nil. + // This in turn causes ipvs.GetVirtualServers and ipvs.AddVirtualServer + // to return ok (err==nil). If/when this bug is fixed parameter "ipvs" will be nil + // if ipvs is not supported by the kernel. Until then a re-read work-around is used. + if ipvs == nil { + return fmt.Errorf("Ipvs not supported by the kernel") } - loadModules := sets.NewString() - loadModules.Insert(mods...) - kernelVersionStr, err := handle.GetKernelVersion() + // Check ipset version + versionString, err := ipsetver.GetVersion() if err != nil { - return fmt.Errorf("error determining kernel version to find required kernel modules for ipvs support: %v", err) + return fmt.Errorf("error getting ipset version, error: %v", err) } - kernelVersion, err := version.ParseGeneric(kernelVersionStr) - if err != nil { - return fmt.Errorf("error parsing kernel version %q: %v", kernelVersionStr, err) + if !checkMinVersion(versionString) { + return fmt.Errorf("ipset version: %s is less than min required version: %s", versionString, MinIPSetCheckVersion) } - mods = utilipvs.GetRequiredIPVSModules(kernelVersion) - wantModules := sets.NewString() - // We check for the existence of the scheduler mod and will trigger a missingMods error if not found + if scheduler == "" { scheduler = defaultScheduler } - schedulerMod := "ip_vs_" + scheduler - mods = append(mods, schedulerMod) - wantModules.Insert(mods...) - modules := wantModules.Difference(loadModules).UnsortedList() - var missingMods []string - ConntrackiMissingCounter := 0 - for _, mod := range modules { - if strings.Contains(mod, "nf_conntrack") { - ConntrackiMissingCounter++ - } else { - missingMods = append(missingMods, mod) - } - } - if ConntrackiMissingCounter == 2 { - missingMods = append(missingMods, "nf_conntrack_ipv4(or nf_conntrack for Linux kernel 4.19 and later)") + // If any virtual server (VS) using the scheduler exist we skip the checks. + vservers, err := ipvs.GetVirtualServers() + if err != nil { + klog.ErrorS(err, "Can't read the ipvs") + return err } - - if len(missingMods) != 0 { - return fmt.Errorf("IPVS proxier will not be used because the following required kernel modules are not loaded: %v", missingMods) + klog.V(5).InfoS("Virtual Servers", "count", len(vservers)) + if len(vservers) > 0 { + // This is most likely a kube-proxy re-start. We know that ipvs works + // and if any VS uses the configured scheduler, we are done. + for _, vs := range vservers { + if vs.Scheduler == scheduler { + klog.V(5).InfoS("VS exist, Skipping checks") + return nil + } + } + klog.V(5).InfoS("No existing VS uses the configured scheduler", "scheduler", scheduler) + } + + // Try to insert a dummy VS with the passed scheduler. + // We should use a VIP address that is not used on the node. + // An address "198.51.100.0" from the TEST-NET-2 rage in https://datatracker.ietf.org/doc/html/rfc5737 + // is used. These addresses are reserved for documentation. If the user is using + // this address for a VS anyway we *will* mess up, but that would be an invalid configuration. + // If the user have configured the address to an interface on the node (but not a VS) + // then traffic will temporary be routed to ipvs during the probe and dropped. + // The later case is also and invalid configuration, but the traffic impact will be minor. + // This should not be a problem if users honors reserved addresses, but cut/paste + // from documentation is not unheard of, so the restriction to not use the TEST-NET-2 range + // must be documented. + vs := utilipvs.VirtualServer{ + Address: netutils.ParseIPSloppy("198.51.100.0"), + Protocol: "TCP", + Port: 20000, + Scheduler: scheduler, + } + if err := ipvs.AddVirtualServer(&vs); err != nil { + klog.ErrorS(err, "Could not create dummy VS", "scheduler", scheduler) + return err } - // Check ipset version - versionString, err := ipsetver.GetVersion() + // To overcome the BUG described above we check that the VS is *really* added. + vservers, err = ipvs.GetVirtualServers() if err != nil { - return fmt.Errorf("error getting ipset version, error: %v", err) + klog.ErrorS(err, "ipvs.GetVirtualServers") + return err } - if !checkMinVersion(versionString) { - return fmt.Errorf("ipset version: %s is less than min required version: %s", versionString, MinIPSetCheckVersion) + klog.V(5).InfoS("Virtual Servers after adding dummy", "count", len(vservers)) + if len(vservers) == 0 { + klog.InfoS("Dummy VS not created", "scheduler", scheduler) + return fmt.Errorf("Ipvs not supported") // This is a BUG work-around } + klog.V(5).InfoS("Dummy VS created", "vs", vs) + + if err := ipvs.DeleteVirtualServer(&vs); err != nil { + klog.ErrorS(err, "Could not delete dummy VS") + return err + } + return nil } @@ -1575,14 +1525,14 @@ func (proxier *Proxier) syncProxyRules() { proxier.iptablesData.Write(proxier.filterChains.Bytes()) proxier.iptablesData.Write(proxier.filterRules.Bytes()) - klog.V(5).InfoS("Restoring iptables", "rules", string(proxier.iptablesData.Bytes())) + klog.V(5).InfoS("Restoring iptables", "rules", proxier.iptablesData.Bytes()) err = proxier.iptables.RestoreAll(proxier.iptablesData.Bytes(), utiliptables.NoFlushTables, utiliptables.RestoreCounters) if err != nil { if pErr, ok := err.(utiliptables.ParseError); ok { lines := utiliptables.ExtractLines(proxier.iptablesData.Bytes(), pErr.Line(), 3) klog.ErrorS(pErr, "Failed to execute iptables-restore", "rules", lines) } else { - klog.ErrorS(err, "Failed to execute iptables-restore", "rules", string(proxier.iptablesData.Bytes())) + klog.ErrorS(err, "Failed to execute iptables-restore", "rules", proxier.iptablesData.Bytes()) } metrics.IptablesRestoreFailuresTotal.Inc() return diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/node.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/node.go index 007699a3f7bb..f2cbf6b1f2d6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/node.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/node.go @@ -43,13 +43,13 @@ func (n *NodePodCIDRHandler) OnNodeAdd(node *v1.Node) { podCIDRs := node.Spec.PodCIDRs // initialize podCIDRs if len(n.podCIDRs) == 0 && len(podCIDRs) > 0 { - klog.InfoS("Setting current PodCIDRs", "PodCIDRs", podCIDRs) + klog.InfoS("Setting current PodCIDRs", "podCIDRs", podCIDRs) n.podCIDRs = podCIDRs return } if !reflect.DeepEqual(n.podCIDRs, podCIDRs) { klog.ErrorS(nil, "Using NodeCIDR LocalDetector mode, current PodCIDRs are different than previous PodCIDRs, restarting", - "node", klog.KObj(node), "New Node PodCIDRs", podCIDRs, "Old Node UID", n.podCIDRs) + "node", klog.KObj(node), "newPodCIDRs", podCIDRs, "oldPodCIDRs", n.podCIDRs) panic("Current Node PodCIDRs are different than previous PodCIDRs, restarting") } } @@ -61,13 +61,13 @@ func (n *NodePodCIDRHandler) OnNodeUpdate(_, node *v1.Node) { podCIDRs := node.Spec.PodCIDRs // initialize podCIDRs if len(n.podCIDRs) == 0 && len(podCIDRs) > 0 { - klog.InfoS("Setting current PodCIDRs", "PodCIDRs", podCIDRs) + klog.InfoS("Setting current PodCIDRs", "podCIDRs", podCIDRs) n.podCIDRs = podCIDRs return } if !reflect.DeepEqual(n.podCIDRs, podCIDRs) { klog.ErrorS(nil, "Using NodeCIDR LocalDetector mode, current PodCIDRs are different than previous PodCIDRs, restarting", - "node", klog.KObj(node), "New Node PodCIDRs", podCIDRs, "Old Node UID", n.podCIDRs) + "node", klog.KObj(node), "newPodCIDRs", podCIDRs, "oldPODCIDRs", n.podCIDRs) panic("Current Node PodCIDRs are different than previous PodCIDRs, restarting") } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/service.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/service.go index a53a299f5480..ceae54eb2023 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/service.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/service.go @@ -52,7 +52,7 @@ type BaseServicePortInfo struct { healthCheckNodePort int externalPolicyLocal bool internalPolicyLocal bool - internalTrafficPolicy *v1.ServiceInternalTrafficPolicyType + internalTrafficPolicy *v1.ServiceInternalTrafficPolicy hintsAnnotation string } @@ -128,7 +128,7 @@ func (bsvcPortInfo *BaseServicePortInfo) InternalPolicyLocal() bool { } // InternalTrafficPolicy is part of ServicePort interface -func (bsvcPortInfo *BaseServicePortInfo) InternalTrafficPolicy() *v1.ServiceInternalTrafficPolicyType { +func (bsvcPortInfo *BaseServicePortInfo) InternalTrafficPolicy() *v1.ServiceInternalTrafficPolicy { return bsvcPortInfo.internalTrafficPolicy } @@ -429,14 +429,16 @@ func (sm *ServicePortMap) apply(changes *ServiceChangeTracker, UDPStaleClusterIP // - produce a string set which stores all other ServicePortMap's ServicePortName.String(). // // For example, -// - A{} -// - B{{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}} -// - A updated to be {{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}} -// - produce string set {"ns/cluster-ip:http"} -// - A{{"ns", "cluster-ip", "http"}: {"172.16.55.10", 345, "UDP"}} -// - B{{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}} -// - A updated to be {{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}} -// - produce string set {"ns/cluster-ip:http"} +// +// A{} +// B{{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}} +// A updated to be {{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}} +// produce string set {"ns/cluster-ip:http"} +// +// A{{"ns", "cluster-ip", "http"}: {"172.16.55.10", 345, "UDP"}} +// B{{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}} +// A updated to be {{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}} +// produce string set {"ns/cluster-ip:http"} func (sm *ServicePortMap) merge(other ServicePortMap) sets.String { // existingPorts is going to store all identifiers of all services in `other` ServicePortMap. existingPorts := sets.NewString() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/types.go index 956447dc808f..30e035a6e630 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/types.go @@ -88,7 +88,7 @@ type ServicePort interface { // InternalPolicyLocal returns if a service has only node local endpoints for internal traffic. InternalPolicyLocal() bool // InternalTrafficPolicy returns service InternalTrafficPolicy - InternalTrafficPolicy() *v1.ServiceInternalTrafficPolicyType + InternalTrafficPolicy() *v1.ServiceInternalTrafficPolicy // HintsAnnotation returns the value of the v1.AnnotationTopologyAwareHints annotation. HintsAnnotation() string // ExternallyAccessible returns true if the service port is reachable via something diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/nodeport_addresses.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/nodeport_addresses.go new file mode 100644 index 000000000000..27d186964d08 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/nodeport_addresses.go @@ -0,0 +1,116 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "net" + + "k8s.io/apimachinery/pkg/util/sets" + netutils "k8s.io/utils/net" +) + +// GetNodeAddresses return all matched node IP addresses based on given cidr slice. +// Some callers, e.g. IPVS proxier, need concrete IPs, not ranges, which is why this exists. +// NetworkInterfacer is injected for test purpose. +// We expect the cidrs passed in is already validated. +// Given an empty input `[]`, it will return `0.0.0.0/0` and `::/0` directly. +// If multiple cidrs is given, it will return the minimal IP sets, e.g. given input `[1.2.0.0/16, 0.0.0.0/0]`, it will +// only return `0.0.0.0/0`. +// NOTE: GetNodeAddresses only accepts CIDRs, if you want concrete IPs, e.g. 1.2.3.4, then the input should be 1.2.3.4/32. +func GetNodeAddresses(cidrs []string, nw NetworkInterfacer) (sets.String, error) { + uniqueAddressList := sets.NewString() + if len(cidrs) == 0 { + uniqueAddressList.Insert(IPv4ZeroCIDR) + uniqueAddressList.Insert(IPv6ZeroCIDR) + return uniqueAddressList, nil + } + // First round of iteration to pick out `0.0.0.0/0` or `::/0` for the sake of excluding non-zero IPs. + for _, cidr := range cidrs { + if IsZeroCIDR(cidr) { + uniqueAddressList.Insert(cidr) + } + } + + addrs, err := nw.InterfaceAddrs() + if err != nil { + return nil, fmt.Errorf("error listing all interfaceAddrs from host, error: %v", err) + } + + // Second round of iteration to parse IPs based on cidr. + for _, cidr := range cidrs { + if IsZeroCIDR(cidr) { + continue + } + + _, ipNet, _ := netutils.ParseCIDRSloppy(cidr) + for _, addr := range addrs { + var ip net.IP + // nw.InterfaceAddrs may return net.IPAddr or net.IPNet on windows, and it will return net.IPNet on linux. + switch v := addr.(type) { + case *net.IPAddr: + ip = v.IP + case *net.IPNet: + ip = v.IP + default: + continue + } + + if ipNet.Contains(ip) { + if netutils.IsIPv6(ip) && !uniqueAddressList.Has(IPv6ZeroCIDR) { + uniqueAddressList.Insert(ip.String()) + } + if !netutils.IsIPv6(ip) && !uniqueAddressList.Has(IPv4ZeroCIDR) { + uniqueAddressList.Insert(ip.String()) + } + } + } + } + + if uniqueAddressList.Len() == 0 { + return nil, fmt.Errorf("no addresses found for cidrs %v", cidrs) + } + + return uniqueAddressList, nil +} + +// ContainsIPv4Loopback returns true if the input is empty or one of the CIDR contains an IPv4 loopback address. +func ContainsIPv4Loopback(cidrStrings []string) bool { + if len(cidrStrings) == 0 { + return true + } + // RFC 5735 127.0.0.0/8 - This block is assigned for use as the Internet host loopback address + ipv4LoopbackStart := netutils.ParseIPSloppy("127.0.0.0") + for _, cidr := range cidrStrings { + ip, ipnet, err := netutils.ParseCIDRSloppy(cidr) + if err != nil { + continue + } + + if netutils.IsIPv6CIDR(ipnet) { + continue + } + + if ip.IsLoopback() { + return true + } + if ipnet.Contains(ipv4LoopbackStart) { + return true + } + } + return false +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go index 92ef4658082d..b67a1c1de49d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go @@ -78,37 +78,6 @@ func BuildPortsToEndpointsMap(endpoints *v1.Endpoints) map[string][]string { return portsToEndpoints } -// ContainsIPv4Loopback returns true if the input is empty or one of the CIDR contains an IPv4 loopback address. -func ContainsIPv4Loopback(cidrStrings []string) bool { - if len(cidrStrings) == 0 { - return true - } - // RFC 5735 127.0.0.0/8 - This block is assigned for use as the Internet host loopback address - ipv4LoopbackStart := netutils.ParseIPSloppy("127.0.0.0") - for _, cidr := range cidrStrings { - if IsZeroCIDR(cidr) { - return true - } - - ip, ipnet, err := netutils.ParseCIDRSloppy(cidr) - if err != nil { - continue - } - - if netutils.IsIPv6CIDR(ipnet) { - continue - } - - if ip.IsLoopback() { - return true - } - if ipnet.Contains(ipv4LoopbackStart) { - return true - } - } - return false -} - // IsZeroCIDR checks whether the input CIDR string is either // the IPv4 or IPv6 zero CIDR func IsZeroCIDR(cidr string) bool { @@ -228,70 +197,6 @@ func ShouldSkipService(service *v1.Service) bool { return false } -// GetNodeAddresses return all matched node IP addresses based on given cidr slice. -// Some callers, e.g. IPVS proxier, need concrete IPs, not ranges, which is why this exists. -// NetworkInterfacer is injected for test purpose. -// We expect the cidrs passed in is already validated. -// Given an empty input `[]`, it will return `0.0.0.0/0` and `::/0` directly. -// If multiple cidrs is given, it will return the minimal IP sets, e.g. given input `[1.2.0.0/16, 0.0.0.0/0]`, it will -// only return `0.0.0.0/0`. -// NOTE: GetNodeAddresses only accepts CIDRs, if you want concrete IPs, e.g. 1.2.3.4, then the input should be 1.2.3.4/32. -func GetNodeAddresses(cidrs []string, nw NetworkInterfacer) (sets.String, error) { - uniqueAddressList := sets.NewString() - if len(cidrs) == 0 { - uniqueAddressList.Insert(IPv4ZeroCIDR) - uniqueAddressList.Insert(IPv6ZeroCIDR) - return uniqueAddressList, nil - } - // First round of iteration to pick out `0.0.0.0/0` or `::/0` for the sake of excluding non-zero IPs. - for _, cidr := range cidrs { - if IsZeroCIDR(cidr) { - uniqueAddressList.Insert(cidr) - } - } - - addrs, err := nw.InterfaceAddrs() - if err != nil { - return nil, fmt.Errorf("error listing all interfaceAddrs from host, error: %v", err) - } - - // Second round of iteration to parse IPs based on cidr. - for _, cidr := range cidrs { - if IsZeroCIDR(cidr) { - continue - } - - _, ipNet, _ := netutils.ParseCIDRSloppy(cidr) - for _, addr := range addrs { - var ip net.IP - // nw.InterfaceAddrs may return net.IPAddr or net.IPNet on windows, and it will return net.IPNet on linux. - switch v := addr.(type) { - case *net.IPAddr: - ip = v.IP - case *net.IPNet: - ip = v.IP - default: - continue - } - - if ipNet.Contains(ip) { - if netutils.IsIPv6(ip) && !uniqueAddressList.Has(IPv6ZeroCIDR) { - uniqueAddressList.Insert(ip.String()) - } - if !netutils.IsIPv6(ip) && !uniqueAddressList.Has(IPv4ZeroCIDR) { - uniqueAddressList.Insert(ip.String()) - } - } - } - } - - if uniqueAddressList.Len() == 0 { - return nil, fmt.Errorf("no addresses found for cidrs %v", cidrs) - } - - return uniqueAddressList, nil -} - // AddressSet validates the addresses in the slice using the "isValid" function. // Addresses that pass the validation are returned as a string Set. func AddressSet(isValid func(ip net.IP) bool, addrs []net.Addr) sets.String { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/hns.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/hns.go index 9d3889d60ccb..db0f835c55af 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/hns.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/hns.go @@ -48,6 +48,8 @@ type hns struct{} var ( // LoadBalancerFlagsIPv6 enables IPV6. LoadBalancerFlagsIPv6 hcn.LoadBalancerFlags = 2 + // LoadBalancerPortMappingFlagsVipExternalIP enables VipExternalIP. + LoadBalancerPortMappingFlagsVipExternalIP hcn.LoadBalancerPortMappingFlags = 16 ) func (hns hns) getNetworkByName(name string) (*hnsNetworkInfo, error) { @@ -242,6 +244,20 @@ func (hns hns) deleteEndpoint(hnsID string) error { return err } +// findLoadBalancerID will construct a id from the provided loadbalancer fields +func findLoadBalancerID(endpoints []endpointsInfo, vip string, protocol, internalPort, externalPort uint16) (loadBalancerIdentifier, error) { + // Compute hash from backends (endpoint IDs) + hash, err := hashEndpoints(endpoints) + if err != nil { + klog.V(2).ErrorS(err, "Error hashing endpoints", "endpoints", endpoints) + return loadBalancerIdentifier{}, err + } + if len(vip) > 0 { + return loadBalancerIdentifier{protocol: protocol, internalPort: internalPort, externalPort: externalPort, vip: vip, endpointsHash: hash}, nil + } + return loadBalancerIdentifier{protocol: protocol, internalPort: internalPort, externalPort: externalPort, endpointsHash: hash}, nil +} + func (hns hns) getAllLoadBalancers() (map[loadBalancerIdentifier]*loadBalancerInfo, error) { lbs, err := hcn.ListLoadBalancers() var id loadBalancerIdentifier @@ -305,6 +321,9 @@ func (hns hns) getLoadBalancer(endpoints []endpointsInfo, flags loadBalancerFlag if flags.localRoutedVIP { lbPortMappingFlags |= hcn.LoadBalancerPortMappingFlagsLocalRoutedVIP } + if flags.isVipExternalIP { + lbPortMappingFlags |= LoadBalancerPortMappingFlagsVipExternalIP + } lbFlags := hcn.LoadBalancerFlagsNone if flags.isDSR { @@ -391,7 +410,7 @@ func hashEndpoints[T string | endpointsInfo](endpoints []T) (hash [20]byte, err for _, ep := range endpoints { switch x := any(ep).(type) { case endpointsInfo: - id = x.hnsID + id = strings.ToUpper(x.hnsID) case string: id = x } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/proxier.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/proxier.go index 887c9adece30..16cc4317b222 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/proxier.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/proxier.go @@ -107,6 +107,7 @@ type loadBalancerIdentifier struct { type loadBalancerFlags struct { isILB bool isDSR bool + isVipExternalIP bool localRoutedVIP bool useMUX bool preserveDIP bool @@ -127,6 +128,8 @@ type serviceInfo struct { hns HostNetworkService preserveDIP bool localTrafficDSR bool + internalTrafficLocal bool + winProxyOptimization bool } type hnsNetworkInfo struct { @@ -143,7 +146,12 @@ type remoteSubnetInfo struct { drMacAddress string } -const NETWORK_TYPE_OVERLAY = "overlay" +const ( + NETWORK_TYPE_OVERLAY = "overlay" + // MAX_COUNT_STALE_LOADBALANCERS is the maximum number of stale loadbalancers which cleanedup in single syncproxyrules. + // If there are more stale loadbalancers to clean, it will go to next iteration of syncproxyrules. + MAX_COUNT_STALE_LOADBALANCERS = 20 +) func newHostNetworkService() (HostNetworkService, hcn.SupportedFeatures) { var h HostNetworkService @@ -156,6 +164,44 @@ func newHostNetworkService() (HostNetworkService, hcn.SupportedFeatures) { return h, supportedFeatures } +// logFormattedEndpoints will log all endpoints and its states which are taking part in endpointmap change. +// This mostly for debugging purpose and verbosity is set to 5. +func logFormattedEndpoints(logMsg string, logLevel klog.Level, svcPortName proxy.ServicePortName, eps []proxy.Endpoint) { + if klog.V(logLevel).Enabled() { + var epInfo string + for _, v := range eps { + epInfo = epInfo + fmt.Sprintf("\n %s={Ready:%v,Serving:%v,Terminating:%v,IsRemote:%v}", v.String(), v.IsReady(), v.IsServing(), v.IsTerminating(), !v.GetIsLocal()) + } + klog.V(logLevel).InfoS(logMsg, "svcPortName", svcPortName, "endpoints", epInfo) + } +} + +// This will cleanup stale load balancers which are pending delete +// in last iteration. This function will act like a self healing of stale +// loadbalancer entries. +func (proxier *Proxier) cleanupStaleLoadbalancers() { + i := 0 + countStaleLB := len(proxier.mapStaleLoadbalancers) + if countStaleLB == 0 { + return + } + klog.V(3).InfoS("Cleanup of stale loadbalancers triggered", "LB Count", countStaleLB) + for lbID := range proxier.mapStaleLoadbalancers { + i++ + if err := proxier.hns.deleteLoadBalancer(lbID); err == nil { + delete(proxier.mapStaleLoadbalancers, lbID) + } + if i == MAX_COUNT_STALE_LOADBALANCERS { + // The remaining stale loadbalancers will be cleaned up in next iteration + break + } + } + countStaleLB = len(proxier.mapStaleLoadbalancers) + if countStaleLB > 0 { + klog.V(3).InfoS("Stale loadbalancers still remaining", "LB Count", countStaleLB, "stale_lb_ids", proxier.mapStaleLoadbalancers) + } +} + func getNetworkName(hnsNetworkName string) (string, error) { if len(hnsNetworkName) == 0 { klog.V(3).InfoS("Flag --network-name not set, checking environment variable") @@ -312,16 +358,24 @@ func conjureMac(macPrefix string, ip net.IP) string { } func (proxier *Proxier) endpointsMapChange(oldEndpointsMap, newEndpointsMap proxy.EndpointsMap) { - for svcPortName := range oldEndpointsMap { - proxier.onEndpointsMapChange(&svcPortName) + // This will optimize remote endpoint and loadbalancer deletion based on the annotation + var svcPortMap = make(map[proxy.ServicePortName]bool) + var logLevel klog.Level = 5 + for svcPortName, eps := range oldEndpointsMap { + logFormattedEndpoints("endpointsMapChange oldEndpointsMap", logLevel, svcPortName, eps) + svcPortMap[svcPortName] = true + proxier.onEndpointsMapChange(&svcPortName, false) } - for svcPortName := range newEndpointsMap { - proxier.onEndpointsMapChange(&svcPortName) + for svcPortName, eps := range newEndpointsMap { + logFormattedEndpoints("endpointsMapChange newEndpointsMap", logLevel, svcPortName, eps) + // redundantCleanup true means cleanup is called second time on the same svcPort + redundantCleanup := svcPortMap[svcPortName] + proxier.onEndpointsMapChange(&svcPortName, redundantCleanup) } } -func (proxier *Proxier) onEndpointsMapChange(svcPortName *proxy.ServicePortName) { +func (proxier *Proxier) onEndpointsMapChange(svcPortName *proxy.ServicePortName, redundantCleanup bool) { svc, exists := proxier.svcPortMap[*svcPortName] @@ -333,8 +387,15 @@ func (proxier *Proxier) onEndpointsMapChange(svcPortName *proxy.ServicePortName) return } + if svcInfo.winProxyOptimization && redundantCleanup { + // This is a second cleanup call. + // Second cleanup on the same svcPort will be ignored if the + // winProxyOptimization is Enabled + return + } + klog.V(3).InfoS("Endpoints are modified. Service is stale", "servicePortName", svcPortName) - svcInfo.cleanupAllPolicies(proxier.endpointsMap[*svcPortName]) + svcInfo.cleanupAllPolicies(proxier.endpointsMap[*svcPortName], proxier.mapStaleLoadbalancers, true) } else { // If no service exists, just cleanup the remote endpoints klog.V(3).InfoS("Endpoints are orphaned, cleaning up") @@ -381,7 +442,7 @@ func (proxier *Proxier) onServiceMapChange(svcPortName *proxy.ServicePortName) { } klog.V(3).InfoS("Updating existing service port", "servicePortName", svcPortName, "clusterIP", svcInfo.ClusterIP(), "port", svcInfo.Port(), "protocol", svcInfo.Protocol()) - svcInfo.cleanupAllPolicies(proxier.endpointsMap[*svcPortName]) + svcInfo.cleanupAllPolicies(proxier.endpointsMap[*svcPortName], proxier.mapStaleLoadbalancers, false) } } @@ -426,6 +487,13 @@ func newSourceVIP(hns HostNetworkService, network string, ip string, mac string, return ep, err } +func (ep *endpointsInfo) DecrementRefCount() { + klog.V(3).InfoS("Decrementing Endpoint RefCount", "endpointsInfo", ep) + if !ep.GetIsLocal() && ep.refCount != nil && *ep.refCount > 0 { + *ep.refCount-- + } +} + func (ep *endpointsInfo) Cleanup() { klog.V(3).InfoS("Endpoint cleanup", "endpointsInfo", ep) if !ep.GetIsLocal() && ep.refCount != nil { @@ -461,7 +529,13 @@ func (refCountMap endPointsReferenceCountMap) getRefCount(hnsID string) *uint16 func (proxier *Proxier) newServiceInfo(port *v1.ServicePort, service *v1.Service, bsvcPortInfo *proxy.BaseServicePortInfo) proxy.ServicePort { info := &serviceInfo{BaseServicePortInfo: bsvcPortInfo} preserveDIP := service.Annotations["preserve-destination"] == "true" - localTrafficDSR := service.Spec.ExternalTrafficPolicy == v1.ServiceExternalTrafficPolicyTypeLocal + // Annotation introduced to enable optimized loadbalancing + winProxyOptimization := !(strings.ToUpper(service.Annotations["winProxyOptimization"]) == "DISABLED") + localTrafficDSR := service.Spec.ExternalTrafficPolicy == v1.ServiceExternalTrafficPolicyLocal + var internalTrafficLocal bool + if service.Spec.InternalTrafficPolicy != nil { + internalTrafficLocal = *service.Spec.InternalTrafficPolicy == v1.ServiceInternalTrafficPolicyLocal + } err := hcn.DSRSupported() if err != nil { preserveDIP = false @@ -478,6 +552,9 @@ func (proxier *Proxier) newServiceInfo(port *v1.ServicePort, service *v1.Service info.targetPort = targetPort info.hns = proxier.hns info.localTrafficDSR = localTrafficDSR + info.internalTrafficLocal = internalTrafficLocal + info.winProxyOptimization = winProxyOptimization + klog.V(3).InfoS("Flags enabled for service", "service", service.Name, "localTrafficDSR", localTrafficDSR, "internalTrafficLocal", internalTrafficLocal, "preserveDIP", preserveDIP, "winProxyOptimization", winProxyOptimization) for _, eip := range service.Spec.ExternalIPs { info.externalIPs = append(info.externalIPs, &externalIPInfo{ip: eip}) @@ -561,6 +638,7 @@ type Proxier struct { forwardHealthCheckVip bool rootHnsEndpointName string + mapStaleLoadbalancers map[string]bool // This maintains entries of stale load balancers which are pending delete in last iteration } type localPort struct { @@ -719,6 +797,7 @@ func NewProxier( healthzPort: healthzPort, rootHnsEndpointName: config.RootHnsEndpointName, forwardHealthCheckVip: config.ForwardHealthCheckVip, + mapStaleLoadbalancers: make(map[string]bool), } ipFamily := v1.IPv4Protocol @@ -780,15 +859,25 @@ func CleanupLeftovers() (encounteredError bool) { return encounteredError } -func (svcInfo *serviceInfo) cleanupAllPolicies(endpoints []proxy.Endpoint) { +func (svcInfo *serviceInfo) cleanupAllPolicies(endpoints []proxy.Endpoint, mapStaleLoadbalancers map[string]bool, isEndpointChange bool) { klog.V(3).InfoS("Service cleanup", "serviceInfo", svcInfo) - // Skip the svcInfo.policyApplied check to remove all the policies - svcInfo.deleteLoadBalancerPolicy() + // if it's an endpoint change and winProxyOptimization annotation enable, skip lb deletion and remoteEndpoint deletion + winProxyOptimization := isEndpointChange && svcInfo.winProxyOptimization + if winProxyOptimization { + klog.V(3).InfoS("Skipped loadbalancer deletion.", "hnsID", svcInfo.hnsID, "nodePorthnsID", svcInfo.nodePorthnsID, "winProxyOptimization", svcInfo.winProxyOptimization, "isEndpointChange", isEndpointChange) + } else { + // Skip the svcInfo.policyApplied check to remove all the policies + svcInfo.deleteLoadBalancerPolicy(mapStaleLoadbalancers) + } // Cleanup Endpoints references for _, ep := range endpoints { epInfo, ok := ep.(*endpointsInfo) if ok { - epInfo.Cleanup() + if winProxyOptimization { + epInfo.DecrementRefCount() + } else { + epInfo.Cleanup() + } } } if svcInfo.remoteEndpoint != nil { @@ -798,10 +887,11 @@ func (svcInfo *serviceInfo) cleanupAllPolicies(endpoints []proxy.Endpoint) { svcInfo.policyApplied = false } -func (svcInfo *serviceInfo) deleteLoadBalancerPolicy() { +func (svcInfo *serviceInfo) deleteLoadBalancerPolicy(mapStaleLoadbalancer map[string]bool) { // Remove the Hns Policy corresponding to this service hns := svcInfo.hns if err := hns.deleteLoadBalancer(svcInfo.hnsID); err != nil { + mapStaleLoadbalancer[svcInfo.hnsID] = true klog.V(1).ErrorS(err, "Error deleting Hns loadbalancer policy resource.", "hnsID", svcInfo.hnsID, "ClusterIP", svcInfo.ClusterIP()) } else { // On successful delete, remove hnsId @@ -809,6 +899,7 @@ func (svcInfo *serviceInfo) deleteLoadBalancerPolicy() { } if err := hns.deleteLoadBalancer(svcInfo.nodePorthnsID); err != nil { + mapStaleLoadbalancer[svcInfo.nodePorthnsID] = true klog.V(1).ErrorS(err, "Error deleting Hns NodePort policy resource.", "hnsID", svcInfo.nodePorthnsID, "NodePort", svcInfo.NodePort()) } else { // On successful delete, remove hnsId @@ -816,6 +907,7 @@ func (svcInfo *serviceInfo) deleteLoadBalancerPolicy() { } for _, externalIP := range svcInfo.externalIPs { + mapStaleLoadbalancer[externalIP.hnsID] = true if err := hns.deleteLoadBalancer(externalIP.hnsID); err != nil { klog.V(1).ErrorS(err, "Error deleting Hns ExternalIP policy resource.", "hnsID", externalIP.hnsID, "IP", externalIP.ip) } else { @@ -824,7 +916,9 @@ func (svcInfo *serviceInfo) deleteLoadBalancerPolicy() { } } for _, lbIngressIP := range svcInfo.loadBalancerIngressIPs { + klog.V(3).InfoS("Loadbalancer Hns LoadBalancer delete triggered for loadBalancer Ingress resources in cleanup", "lbIngressIP", lbIngressIP) if err := hns.deleteLoadBalancer(lbIngressIP.hnsID); err != nil { + mapStaleLoadbalancer[lbIngressIP.hnsID] = true klog.V(1).ErrorS(err, "Error deleting Hns IngressIP policy resource.", "hnsID", lbIngressIP.hnsID, "IP", lbIngressIP.ip) } else { // On successful delete, remove hnsId @@ -833,6 +927,7 @@ func (svcInfo *serviceInfo) deleteLoadBalancerPolicy() { if lbIngressIP.healthCheckHnsID != "" { if err := hns.deleteLoadBalancer(lbIngressIP.healthCheckHnsID); err != nil { + mapStaleLoadbalancer[lbIngressIP.healthCheckHnsID] = true klog.V(1).ErrorS(err, "Error deleting Hns IngressIP HealthCheck policy resource.", "hnsID", lbIngressIP.healthCheckHnsID, "IP", lbIngressIP.ip) } else { // On successful delete, remove hnsId @@ -992,7 +1087,7 @@ func (proxier *Proxier) cleanupAllPolicies() { klog.ErrorS(nil, "Failed to cast serviceInfo", "serviceName", svcName) continue } - svcInfo.cleanupAllPolicies(proxier.endpointsMap[svcName]) + svcInfo.cleanupAllPolicies(proxier.endpointsMap[svcName], proxier.mapStaleLoadbalancers, false) } } @@ -1009,6 +1104,56 @@ func isNetworkNotFoundError(err error) bool { return false } +// isAllEndpointsTerminating function will return true if all the endpoints are terminating. +// If atleast one is not terminating, then return false +func (proxier *Proxier) isAllEndpointsTerminating(svcName proxy.ServicePortName, isLocalTrafficDSR bool) bool { + for _, epInfo := range proxier.endpointsMap[svcName] { + ep, ok := epInfo.(*endpointsInfo) + if !ok { + continue + } + if isLocalTrafficDSR && !ep.GetIsLocal() { + // KEP-1669: Ignore remote endpoints when the ExternalTrafficPolicy is Local (DSR Mode) + continue + } + // If Readiness Probe fails and pod is not under delete, then + // the state of the endpoint will be - Ready:False, Serving:False, Terminating:False + if !ep.IsReady() && !ep.IsTerminating() { + // Ready:false, Terminating:False, ignore + continue + } + if !ep.IsTerminating() { + return false + } + } + return true +} + +// isAllEndpointsNonServing function will return true if all the endpoints are non serving. +// If atleast one is serving, then return false +func (proxier *Proxier) isAllEndpointsNonServing(svcName proxy.ServicePortName, isLocalTrafficDSR bool) bool { + for _, epInfo := range proxier.endpointsMap[svcName] { + ep, ok := epInfo.(*endpointsInfo) + if !ok { + continue + } + if isLocalTrafficDSR && !ep.GetIsLocal() { + continue + } + if ep.IsServing() { + return false + } + } + return true +} + +// updateQueriedEndpoints updates the queriedEndpoints map with newly created endpoint details +func updateQueriedEndpoints(newHnsEndpoint *endpointsInfo, queriedEndpoints map[string]*endpointsInfo) { + // store newly created endpoints in queriedEndpoints + queriedEndpoints[newHnsEndpoint.hnsID] = newHnsEndpoint + queriedEndpoints[newHnsEndpoint.ip] = newHnsEndpoint +} + // This is where all of the hns save/restore calls happen. // assumes proxier.mu is held func (proxier *Proxier) syncProxyRules() { @@ -1125,9 +1270,7 @@ func (proxier *Proxier) syncProxyRules() { newHnsEndpoint.refCount = proxier.endPointsRefCount.getRefCount(newHnsEndpoint.hnsID) *newHnsEndpoint.refCount++ svcInfo.remoteEndpoint = newHnsEndpoint - // store newly created endpoints in queriedEndpoints - queriedEndpoints[newHnsEndpoint.hnsID] = newHnsEndpoint - queriedEndpoints[newHnsEndpoint.ip] = newHnsEndpoint + updateQueriedEndpoints(newHnsEndpoint, queriedEndpoints) } } @@ -1137,6 +1280,19 @@ func (proxier *Proxier) syncProxyRules() { // Create Remote endpoints for every endpoint, corresponding to the service containsPublicIP := false containsNodeIP := false + var allEndpointsTerminating, allEndpointsNonServing bool + someEndpointsServing := true + + if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.ProxyTerminatingEndpoints) && len(svcInfo.loadBalancerIngressIPs) > 0 { + // Check should be done only if comes under the feature gate or enabled + // The check should be done only if Spec.Type == Loadbalancer. + allEndpointsTerminating = proxier.isAllEndpointsTerminating(svcName, svcInfo.localTrafficDSR) + allEndpointsNonServing = proxier.isAllEndpointsNonServing(svcName, svcInfo.localTrafficDSR) + someEndpointsServing = !allEndpointsNonServing + klog.V(4).InfoS("Terminating status checked for all endpoints", "svcClusterIP", svcInfo.ClusterIP(), "allEndpointsTerminating", allEndpointsTerminating, "allEndpointsNonServing", allEndpointsNonServing, "localTrafficDSR", svcInfo.localTrafficDSR) + } else { + klog.V(4).InfoS("Skipped terminating status check for all endpoints", "svcClusterIP", svcInfo.ClusterIP(), "proxyEndpointsFeatureGateEnabled", utilfeature.DefaultFeatureGate.Enabled(kubefeatures.ProxyTerminatingEndpoints), "ingressLBCount", len(svcInfo.loadBalancerIngressIPs)) + } for _, epInfo := range proxier.endpointsMap[svcName] { ep, ok := epInfo.(*endpointsInfo) @@ -1145,9 +1301,25 @@ func (proxier *Proxier) syncProxyRules() { continue } - if !ep.IsReady() { + if svcInfo.internalTrafficLocal && svcInfo.localTrafficDSR && !ep.GetIsLocal() { + // No need to use or create remote endpoint when internal and external traffic policy is remote + klog.V(3).InfoS("Skipping the endpoint. Both internalTraffic and external traffic policies are local", "EpIP", ep.ip, " EpPort", ep.port) continue } + + if someEndpointsServing { + + if !allEndpointsTerminating && !ep.IsReady() { + klog.V(3).InfoS("Skipping the endpoint for LB creation. Endpoint is either not ready or all not all endpoints are terminating", "EpIP", ep.ip, " EpPort", ep.port, "allEndpointsTerminating", allEndpointsTerminating, "IsEpReady", ep.IsReady()) + continue + } + if !ep.IsServing() { + klog.V(3).InfoS("Skipping the endpoint for LB creation. Endpoint is not serving", "EpIP", ep.ip, " EpPort", ep.port, "IsEpServing", ep.IsServing()) + continue + } + + } + var newHnsEndpoint *endpointsInfo hnsNetworkName := proxier.network.name var err error @@ -1205,6 +1377,7 @@ func (proxier *Proxier) syncProxyRules() { klog.ErrorS(err, "Remote endpoint creation failed", "endpointsInfo", hnsEndpoint) continue } + updateQueriedEndpoints(newHnsEndpoint, queriedEndpoints) } else { hnsEndpoint := &endpointsInfo{ @@ -1218,6 +1391,7 @@ func (proxier *Proxier) syncProxyRules() { klog.ErrorS(err, "Remote endpoint creation failed") continue } + updateQueriedEndpoints(newHnsEndpoint, queriedEndpoints) } } // For Overlay networks 'SourceVIP' on an Load balancer Policy can either be chosen as @@ -1266,7 +1440,14 @@ func (proxier *Proxier) syncProxyRules() { klog.InfoS("Load Balancer already exists -- Debug ", "hnsID", svcInfo.hnsID) } + // In ETP:Cluster, if all endpoints are under termination, + // it will have serving and terminating, else only ready and serving if len(hnsEndpoints) == 0 { + if svcInfo.winProxyOptimization { + // Deleting loadbalancers when there are no endpoints to serve. + klog.V(3).InfoS("Cleanup existing ", "endpointsInfo", hnsEndpoints, "serviceName", svcName) + svcInfo.deleteLoadBalancerPolicy(proxier.mapStaleLoadbalancers) + } klog.ErrorS(nil, "Endpoint information not available for service, not applying any policy", "serviceName", svcName) continue } @@ -1283,23 +1464,41 @@ func (proxier *Proxier) syncProxyRules() { klog.InfoS("Session Affinity is not supported on this version of Windows") } - hnsLoadBalancer, err := hns.getLoadBalancer( - hnsEndpoints, - loadBalancerFlags{isDSR: proxier.isDSR, isIPv6: proxier.isIPv6Mode, sessionAffinity: sessionAffinityClientIP}, - sourceVip, - svcInfo.ClusterIP().String(), - Enum(svcInfo.Protocol()), - uint16(svcInfo.targetPort), - uint16(svcInfo.Port()), - queriedLoadBalancers, - ) - if err != nil { - klog.ErrorS(err, "Policy creation failed") - continue - } + endpointsAvailableForLB := !allEndpointsTerminating && !allEndpointsNonServing + proxier.deleteExistingLoadBalancer(hns, svcInfo.winProxyOptimization, &svcInfo.hnsID, sourceVip, Enum(svcInfo.Protocol()), uint16(svcInfo.targetPort), uint16(svcInfo.Port()), hnsEndpoints, queriedLoadBalancers) + + if endpointsAvailableForLB { + + // clusterIPEndpoints is the endpoint list used for creating ClusterIP loadbalancer. + clusterIPEndpoints := hnsEndpoints + if svcInfo.internalTrafficLocal { + // Take local endpoints for clusterip loadbalancer when internal traffic policy is local. + clusterIPEndpoints = hnsLocalEndpoints + } + + // If all endpoints are terminating, then no need to create Cluster IP LoadBalancer + // Cluster IP LoadBalancer creation + hnsLoadBalancer, err := hns.getLoadBalancer( + clusterIPEndpoints, + loadBalancerFlags{isDSR: proxier.isDSR, isIPv6: proxier.isIPv6Mode, sessionAffinity: sessionAffinityClientIP}, + sourceVip, + svcInfo.ClusterIP().String(), + Enum(svcInfo.Protocol()), + uint16(svcInfo.targetPort), + uint16(svcInfo.Port()), + queriedLoadBalancers, + ) + if err != nil { + klog.ErrorS(err, "Policy creation failed") + continue + } + + svcInfo.hnsID = hnsLoadBalancer.hnsID + klog.V(3).InfoS("Hns LoadBalancer resource created for cluster ip resources", "clusterIP", svcInfo.ClusterIP(), "hnsID", hnsLoadBalancer.hnsID) - svcInfo.hnsID = hnsLoadBalancer.hnsID - klog.V(3).InfoS("Hns LoadBalancer resource created for cluster ip resources", "clusterIP", svcInfo.ClusterIP(), "hnsID", hnsLoadBalancer.hnsID) + } else { + klog.V(3).InfoS("Skipped creating Hns LoadBalancer for cluster ip resources. Reason : all endpoints are terminating", "clusterIP", svcInfo.ClusterIP(), "nodeport", svcInfo.NodePort(), "allEndpointsTerminating", allEndpointsTerminating) + } // If nodePort is specified, user should be able to use nodeIP:nodePort to reach the backend endpoints if svcInfo.NodePort() > 0 { @@ -1310,10 +1509,13 @@ func (proxier *Proxier) syncProxyRules() { nodePortEndpoints = hnsLocalEndpoints } - if len(nodePortEndpoints) > 0 { + proxier.deleteExistingLoadBalancer(hns, svcInfo.winProxyOptimization, &svcInfo.nodePorthnsID, sourceVip, Enum(svcInfo.Protocol()), uint16(svcInfo.targetPort), uint16(svcInfo.Port()), nodePortEndpoints, queriedLoadBalancers) + + if len(nodePortEndpoints) > 0 && endpointsAvailableForLB { + // If all endpoints are in terminating stage, then no need to create Node Port LoadBalancer hnsLoadBalancer, err := hns.getLoadBalancer( nodePortEndpoints, - loadBalancerFlags{isDSR: svcInfo.localTrafficDSR, localRoutedVIP: true, sessionAffinity: sessionAffinityClientIP, isIPv6: proxier.isIPv6Mode}, + loadBalancerFlags{isVipExternalIP: true, isDSR: svcInfo.localTrafficDSR, localRoutedVIP: true, sessionAffinity: sessionAffinityClientIP, isIPv6: proxier.isIPv6Mode}, sourceVip, "", Enum(svcInfo.Protocol()), @@ -1329,7 +1531,7 @@ func (proxier *Proxier) syncProxyRules() { svcInfo.nodePorthnsID = hnsLoadBalancer.hnsID klog.V(3).InfoS("Hns LoadBalancer resource created for nodePort resources", "clusterIP", svcInfo.ClusterIP(), "nodeport", svcInfo.NodePort(), "hnsID", hnsLoadBalancer.hnsID) } else { - klog.V(3).InfoS("Skipped creating Hns LoadBalancer for nodePort resources", "clusterIP", svcInfo.ClusterIP(), "nodeport", svcInfo.NodePort(), "hnsID", hnsLoadBalancer.hnsID) + klog.V(3).InfoS("Skipped creating Hns LoadBalancer for nodePort resources", "clusterIP", svcInfo.ClusterIP(), "nodeport", svcInfo.NodePort(), "allEndpointsTerminating", allEndpointsTerminating) } } @@ -1341,11 +1543,14 @@ func (proxier *Proxier) syncProxyRules() { externalIPEndpoints = hnsLocalEndpoints } - if len(externalIPEndpoints) > 0 { + proxier.deleteExistingLoadBalancer(hns, svcInfo.winProxyOptimization, &externalIP.hnsID, sourceVip, Enum(svcInfo.Protocol()), uint16(svcInfo.targetPort), uint16(svcInfo.Port()), externalIPEndpoints, queriedLoadBalancers) + + if len(externalIPEndpoints) > 0 && endpointsAvailableForLB { + // If all endpoints are in terminating stage, then no need to External IP LoadBalancer // Try loading existing policies, if already available hnsLoadBalancer, err = hns.getLoadBalancer( externalIPEndpoints, - loadBalancerFlags{isDSR: svcInfo.localTrafficDSR, sessionAffinity: sessionAffinityClientIP, isIPv6: proxier.isIPv6Mode}, + loadBalancerFlags{isVipExternalIP: true, isDSR: svcInfo.localTrafficDSR, sessionAffinity: sessionAffinityClientIP, isIPv6: proxier.isIPv6Mode}, sourceVip, externalIP.ip, Enum(svcInfo.Protocol()), @@ -1360,7 +1565,7 @@ func (proxier *Proxier) syncProxyRules() { externalIP.hnsID = hnsLoadBalancer.hnsID klog.V(3).InfoS("Hns LoadBalancer resource created for externalIP resources", "externalIP", externalIP, "hnsID", hnsLoadBalancer.hnsID) } else { - klog.V(3).InfoS("Skipped creating Hns LoadBalancer for externalIP resources", "externalIP", externalIP, "hnsID", hnsLoadBalancer.hnsID) + klog.V(3).InfoS("Skipped creating Hns LoadBalancer for externalIP resources", "externalIP", externalIP, "allEndpointsTerminating", allEndpointsTerminating) } } // Create a Load Balancer Policy for each loadbalancer ingress @@ -1371,10 +1576,12 @@ func (proxier *Proxier) syncProxyRules() { lbIngressEndpoints = hnsLocalEndpoints } + proxier.deleteExistingLoadBalancer(hns, svcInfo.winProxyOptimization, &lbIngressIP.hnsID, sourceVip, Enum(svcInfo.Protocol()), uint16(svcInfo.targetPort), uint16(svcInfo.Port()), lbIngressEndpoints, queriedLoadBalancers) + if len(lbIngressEndpoints) > 0 { hnsLoadBalancer, err := hns.getLoadBalancer( lbIngressEndpoints, - loadBalancerFlags{isDSR: svcInfo.preserveDIP || svcInfo.localTrafficDSR, useMUX: svcInfo.preserveDIP, preserveDIP: svcInfo.preserveDIP, sessionAffinity: sessionAffinityClientIP, isIPv6: proxier.isIPv6Mode}, + loadBalancerFlags{isVipExternalIP: true, isDSR: svcInfo.preserveDIP || svcInfo.localTrafficDSR, useMUX: svcInfo.preserveDIP, preserveDIP: svcInfo.preserveDIP, sessionAffinity: sessionAffinityClientIP, isIPv6: proxier.isIPv6Mode}, sourceVip, lbIngressIP.ip, Enum(svcInfo.Protocol()), @@ -1392,11 +1599,15 @@ func (proxier *Proxier) syncProxyRules() { klog.V(3).InfoS("Skipped creating Hns LoadBalancer for loadBalancer Ingress resources", "lbIngressIP", lbIngressIP) } - if proxier.forwardHealthCheckVip && gatewayHnsendpoint != nil { + if proxier.forwardHealthCheckVip && gatewayHnsendpoint != nil && endpointsAvailableForLB { + // Avoid creating health check loadbalancer if all the endpoints are terminating nodeport := proxier.healthzPort if svcInfo.HealthCheckNodePort() != 0 { nodeport = svcInfo.HealthCheckNodePort() } + + proxier.deleteExistingLoadBalancer(hns, svcInfo.winProxyOptimization, &lbIngressIP.healthCheckHnsID, sourceVip, Enum(svcInfo.Protocol()), uint16(svcInfo.targetPort), uint16(svcInfo.Port()), []endpointsInfo{*gatewayHnsendpoint}, queriedLoadBalancers) + hnsHealthCheckLoadBalancer, err := hns.getLoadBalancer( []endpointsInfo{*gatewayHnsendpoint}, loadBalancerFlags{isDSR: false, useMUX: svcInfo.preserveDIP, preserveDIP: svcInfo.preserveDIP}, @@ -1413,6 +1624,8 @@ func (proxier *Proxier) syncProxyRules() { } lbIngressIP.healthCheckHnsID = hnsHealthCheckLoadBalancer.hnsID klog.V(3).InfoS("Hns Health Check LoadBalancer resource created for loadBalancer Ingress resources", "ip", lbIngressIP) + } else { + klog.V(3).InfoS("Skipped creating Hns Health Check LoadBalancer for loadBalancer Ingress resources", "ip", lbIngressIP, "allEndpointsTerminating", allEndpointsTerminating) } } svcInfo.policyApplied = true @@ -1444,7 +1657,51 @@ func (proxier *Proxier) syncProxyRules() { // remove stale endpoint refcount entries for hnsID, referenceCount := range proxier.endPointsRefCount { if *referenceCount <= 0 { + klog.V(3).InfoS("Deleting unreferenced remote endpoint", "hnsID", hnsID) + proxier.hns.deleteEndpoint(hnsID) delete(proxier.endPointsRefCount, hnsID) } } + // This will cleanup stale load balancers which are pending delete + // in last iteration + proxier.cleanupStaleLoadbalancers() +} + +// deleteExistingLoadBalancer checks whether loadbalancer delete is needed or not. +// If it is needed, the function will delete the existing loadbalancer and return true, else false. +func (proxier *Proxier) deleteExistingLoadBalancer(hns HostNetworkService, winProxyOptimization bool, lbHnsID *string, sourceVip string, protocol, intPort, extPort uint16, endpoints []endpointsInfo, queriedLoadBalancers map[loadBalancerIdentifier]*loadBalancerInfo) bool { + + if !winProxyOptimization || *lbHnsID == "" { + // Loadbalancer delete not needed + return false + } + + lbID, lbIdErr := findLoadBalancerID( + endpoints, + sourceVip, + protocol, + intPort, + extPort, + ) + + if lbIdErr != nil { + return proxier.deleteLoadBalancer(hns, lbHnsID) + } + + if _, ok := queriedLoadBalancers[lbID]; ok { + // The existing loadbalancer in the system is same as what we try to delete and recreate. So we skip deleting. + return false + } + + return proxier.deleteLoadBalancer(hns, lbHnsID) +} + +func (proxier *Proxier) deleteLoadBalancer(hns HostNetworkService, lbHnsID *string) bool { + klog.V(3).InfoS("Hns LoadBalancer delete triggered for loadBalancer resources", "lbHnsID", *lbHnsID) + if err := hns.deleteLoadBalancer(*lbHnsID); err != nil { + // This will be cleanup by cleanupStaleLoadbalancer fnction. + proxier.mapStaleLoadbalancers[*lbHnsID] = true + } + *lbHnsID = "" + return true } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/types_pluginargs.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/types_pluginargs.go index 31bb8df0201d..6a09d143a3d9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/types_pluginargs.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/types_pluginargs.go @@ -52,6 +52,10 @@ type InterPodAffinityArgs struct { // HardPodAffinityWeight is the scoring weight for existing pods with a // matching hard affinity to the incoming pod. HardPodAffinityWeight int32 + + // IgnorePreferredTermsOfExistingPods configures the scheduler to ignore existing pods' preferred affinity + // rules when scoring candidate nodes, unless the incoming pod has inter-pod affinities. + IgnorePreferredTermsOfExistingPods bool } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.conversion.go index d9dbc31adbc9..dd8d9b231094 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.conversion.go @@ -375,6 +375,7 @@ func autoConvert_v1_InterPodAffinityArgs_To_config_InterPodAffinityArgs(in *v1.I if err := metav1.Convert_Pointer_int32_To_int32(&in.HardPodAffinityWeight, &out.HardPodAffinityWeight, s); err != nil { return err } + out.IgnorePreferredTermsOfExistingPods = in.IgnorePreferredTermsOfExistingPods return nil } @@ -387,6 +388,7 @@ func autoConvert_config_InterPodAffinityArgs_To_v1_InterPodAffinityArgs(in *conf if err := metav1.Convert_int32_To_Pointer_int32(&in.HardPodAffinityWeight, &out.HardPodAffinityWeight, s); err != nil { return err } + out.IgnorePreferredTermsOfExistingPods = in.IgnorePreferredTermsOfExistingPods return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/default_plugins.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/default_plugins.go index 37341a4233e5..70758fab0b91 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/default_plugins.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/default_plugins.go @@ -42,6 +42,7 @@ func getDefaultPlugins() *v1beta2.Plugins { {Name: names.PodTopologySpread}, {Name: names.InterPodAffinity}, {Name: names.VolumeBinding}, + {Name: names.VolumeZone}, {Name: names.NodeAffinity}, }, }, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/zz_generated.conversion.go index 642729e388f0..e5c9fb73fa65 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/zz_generated.conversion.go @@ -375,6 +375,7 @@ func autoConvert_v1beta2_InterPodAffinityArgs_To_config_InterPodAffinityArgs(in if err := v1.Convert_Pointer_int32_To_int32(&in.HardPodAffinityWeight, &out.HardPodAffinityWeight, s); err != nil { return err } + out.IgnorePreferredTermsOfExistingPods = in.IgnorePreferredTermsOfExistingPods return nil } @@ -387,6 +388,7 @@ func autoConvert_config_InterPodAffinityArgs_To_v1beta2_InterPodAffinityArgs(in if err := v1.Convert_int32_To_Pointer_int32(&in.HardPodAffinityWeight, &out.HardPodAffinityWeight, s); err != nil { return err } + out.IgnorePreferredTermsOfExistingPods = in.IgnorePreferredTermsOfExistingPods return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/zz_generated.conversion.go index 1e8fd12ba644..841c537c3f7e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/zz_generated.conversion.go @@ -375,6 +375,7 @@ func autoConvert_v1beta3_InterPodAffinityArgs_To_config_InterPodAffinityArgs(in if err := v1.Convert_Pointer_int32_To_int32(&in.HardPodAffinityWeight, &out.HardPodAffinityWeight, s); err != nil { return err } + out.IgnorePreferredTermsOfExistingPods = in.IgnorePreferredTermsOfExistingPods return nil } @@ -387,6 +388,7 @@ func autoConvert_config_InterPodAffinityArgs_To_v1beta3_InterPodAffinityArgs(in if err := v1.Convert_int32_To_Pointer_int32(&in.HardPodAffinityWeight, &out.HardPodAffinityWeight, s); err != nil { return err } + out.IgnorePreferredTermsOfExistingPods = in.IgnorePreferredTermsOfExistingPods return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/validation/validation.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/validation/validation.go index 1dcfc699e809..e284101607f0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/validation/validation.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/validation/validation.go @@ -42,6 +42,14 @@ func ValidateKubeSchedulerConfiguration(cc *config.KubeSchedulerConfiguration) u var errs []error errs = append(errs, componentbasevalidation.ValidateClientConnectionConfiguration(&cc.ClientConnection, field.NewPath("clientConnection")).ToAggregate()) errs = append(errs, componentbasevalidation.ValidateLeaderElectionConfiguration(&cc.LeaderElection, field.NewPath("leaderElection")).ToAggregate()) + + // TODO: This can be removed when ResourceLock is not available + // Only ResourceLock values with leases are allowed + if cc.LeaderElection.LeaderElect && cc.LeaderElection.ResourceLock != "leases" { + leaderElectionPath := field.NewPath("leaderElection") + errs = append(errs, field.Invalid(leaderElectionPath.Child("resourceLock"), cc.LeaderElection.ResourceLock, `resourceLock value must be "leases"`)) + } + profilesPath := field.NewPath("profiles") if cc.Parallelism <= 0 { errs = append(errs, field.Invalid(field.NewPath("parallelism"), cc.Parallelism, "should be an integer value greater than zero")) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/cycle_state.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/cycle_state.go index 7f4a1351258d..e227c7da8658 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/cycle_state.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/cycle_state.go @@ -19,6 +19,8 @@ package framework import ( "errors" "sync" + + "k8s.io/apimachinery/pkg/util/sets" ) var ( @@ -48,6 +50,8 @@ type CycleState struct { storage sync.Map // if recordPluginMetrics is true, PluginExecutionDuration will be recorded for this cycle. recordPluginMetrics bool + // SkipFilterPlugins are plugins that will be skipped in the Filter extension point. + SkipFilterPlugins sets.Set[string] } // NewCycleState initializes a new CycleState and returns its pointer. @@ -83,6 +87,7 @@ func (c *CycleState) Clone() *CycleState { return true }) copy.recordPluginMetrics = c.recordPluginMetrics + copy.SkipFilterPlugins = c.SkipFilterPlugins return copy } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/interface.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/interface.go index 0049e7acb1bb..07640e5be588 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/interface.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/interface.go @@ -79,33 +79,27 @@ const ( // Error is used for internal plugin errors, unexpected input, etc. Error // Unschedulable is used when a plugin finds a pod unschedulable. The scheduler might attempt to - // preempt other pods to get this pod scheduled. Use UnschedulableAndUnresolvable to make the - // scheduler skip preemption. + // run other postFilter plugins like preemption to get this pod scheduled. + // Use UnschedulableAndUnresolvable to make the scheduler skipping other postFilter plugins. // The accompanying status message should explain why the pod is unschedulable. Unschedulable // UnschedulableAndUnresolvable is used when a plugin finds a pod unschedulable and - // preemption would not change anything. Plugins should return Unschedulable if it is possible - // that the pod can get scheduled with preemption. + // other postFilter plugins like preemption would not change anything. + // Plugins should return Unschedulable if it is possible that the pod can get scheduled + // after running other postFilter plugins. // The accompanying status message should explain why the pod is unschedulable. UnschedulableAndUnresolvable // Wait is used when a Permit plugin finds a pod scheduling should wait. Wait - // Skip is used when a Bind plugin chooses to skip binding. + // Skip is used in the following scenarios: + // - when a Bind plugin chooses to skip binding. + // - when a PreFilter plugin returns Skip so that coupled Filter plugin/PreFilterExtensions() will be skipped. Skip ) // This list should be exactly the same as the codes iota defined above in the same order. var codes = []string{"Success", "Error", "Unschedulable", "UnschedulableAndUnresolvable", "Wait", "Skip"} -// statusPrecedence defines a map from status to its precedence, larger value means higher precedent. -var statusPrecedence = map[Code]int{ - Error: 3, - UnschedulableAndUnresolvable: 2, - Unschedulable: 1, - // Any other statuses we know today, `Skip` or `Wait`, will take precedence over `Success`. - Success: -1, -} - func (c Code) String() string { return codes[c] } @@ -159,6 +153,11 @@ type Status struct { failedPlugin string } +func (s *Status) WithError(err error) *Status { + s.err = err + return s +} + // Code returns code of the Status. func (s *Status) Code() Code { if s == nil { @@ -172,7 +171,7 @@ func (s *Status) Message() string { if s == nil { return "" } - return strings.Join(s.reasons, ", ") + return strings.Join(s.Reasons(), ", ") } // SetFailedPlugin sets the given plugin name to s.failedPlugin. @@ -194,6 +193,9 @@ func (s *Status) FailedPlugin() string { // Reasons returns reasons of the Status. func (s *Status) Reasons() []string { + if s.err != nil { + return append([]string{s.err.Error()}, s.reasons...) + } return s.reasons } @@ -244,10 +246,13 @@ func (s *Status) Equal(x *Status) bool { if s.code != x.code { return false } - if s.code == Error { - return cmp.Equal(s.err, x.err, cmpopts.EquateErrors()) + if !cmp.Equal(s.err, x.err, cmpopts.EquateErrors()) { + return false + } + if !cmp.Equal(s.reasons, x.reasons) { + return false } - return cmp.Equal(s.reasons, x.reasons) + return cmp.Equal(s.failedPlugin, x.failedPlugin) } // NewStatus makes a Status out of the given arguments and returns its pointer. @@ -256,9 +261,6 @@ func NewStatus(code Code, reasons ...string) *Status { code: code, reasons: reasons, } - if code == Error { - s.err = errors.New(s.Message()) - } return s } @@ -268,42 +270,11 @@ func AsStatus(err error) *Status { return nil } return &Status{ - code: Error, - reasons: []string{err.Error()}, - err: err, + code: Error, + err: err, } } -// PluginToStatus maps plugin name to status. Currently used to identify which Filter plugin -// returned which status. -type PluginToStatus map[string]*Status - -// Merge merges the statuses in the map into one. The resulting status code have the following -// precedence: Error, UnschedulableAndUnresolvable, Unschedulable. -func (p PluginToStatus) Merge() *Status { - if len(p) == 0 { - return nil - } - - finalStatus := NewStatus(Success) - for _, s := range p { - if s.Code() == Error { - finalStatus.err = s.AsError() - } - if statusPrecedence[s.Code()] > statusPrecedence[finalStatus.code] { - finalStatus.code = s.Code() - // Same as code, we keep the most relevant failedPlugin in the returned Status. - finalStatus.failedPlugin = s.FailedPlugin() - } - - for _, r := range s.reasons { - finalStatus.AppendReason(r) - } - } - - return finalStatus -} - // WaitingPod represents a pod currently waiting in the permit phase. type WaitingPod interface { // GetPod returns a reference to the waiting pod. @@ -379,6 +350,8 @@ type PreFilterPlugin interface { // plugins must return success or the pod will be rejected. PreFilter could optionally // return a PreFilterResult to influence which nodes to evaluate downstream. This is useful // for cases where it is possible to determine the subset of nodes to process in O(1) time. + // When it returns Skip status, returned PreFilterResult and other fields in status are just ignored, + // and coupled Filter plugin/PreFilterExtensions() will be skipped in this scheduling cycle. PreFilter(ctx context.Context, state *CycleState, p *v1.Pod) (*PreFilterResult, *Status) // PreFilterExtensions returns a PreFilterExtensions interface if the plugin implements one, // or nil if it does not. A Pre-filter plugin can provide extensions to incrementally @@ -753,7 +726,7 @@ type PluginsRunner interface { // preemption, we may pass a copy of the original nodeInfo object that has some pods // removed from it to evaluate the possibility of preempting them to // schedule the target pod. - RunFilterPlugins(context.Context, *CycleState, *v1.Pod, *NodeInfo) PluginToStatus + RunFilterPlugins(context.Context, *CycleState, *v1.Pod, *NodeInfo) *Status // RunPreFilterExtensionAddPod calls the AddPod interface for the set of configured // PreFilter plugins. It returns directly if any of the plugins return any // status other than Success. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpreemption/default_preemption.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpreemption/default_preemption.go index 91fc9ef9ed00..711b54bf37db 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpreemption/default_preemption.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpreemption/default_preemption.go @@ -49,6 +49,7 @@ const Name = names.DefaultPreemption // DefaultPreemption is a PostFilter plugin implements the preemption logic. type DefaultPreemption struct { fh framework.Handle + fts feature.Features args config.DefaultPreemptionArgs podLister corelisters.PodLister pdbLister policylisters.PodDisruptionBudgetLister @@ -72,6 +73,7 @@ func New(dpArgs runtime.Object, fh framework.Handle, fts feature.Features) (fram } pl := DefaultPreemption{ fh: fh, + fts: fts, args: *args, podLister: fh.SharedInformerFactory().Core().V1().Pods().Lister(), pdbLister: getPDBLister(fh.SharedInformerFactory()), @@ -228,15 +230,16 @@ func (pl *DefaultPreemption) SelectVictimsOnNode( // PodEligibleToPreemptOthers returns one bool and one string. The bool // indicates whether this pod should be considered for preempting other pods or // not. The string includes the reason if this pod isn't eligible. -// If this pod has a preemptionPolicy of Never or has already preempted other -// pods and those are in their graceful termination period, it shouldn't be -// considered for preemption. -// We look at the node that is nominated for this pod and as long as there are -// terminating pods on the node, we don't consider this for preempting more pods. +// There're several reasons: +// 1. The pod has a preemptionPolicy of Never. +// 2. The pod has already preempted other pods and the victims are in their graceful termination period. +// Currently we check the node that is nominated for this pod, and as long as there are +// terminating pods on this node, we don't attempt to preempt more pods. func (pl *DefaultPreemption) PodEligibleToPreemptOthers(pod *v1.Pod, nominatedNodeStatus *framework.Status) (bool, string) { if pod.Spec.PreemptionPolicy != nil && *pod.Spec.PreemptionPolicy == v1.PreemptNever { - return false, fmt.Sprint("not eligible due to preemptionPolicy=Never.") + return false, "not eligible due to preemptionPolicy=Never." } + nodeInfos := pl.fh.SnapshotSharedLister().NodeInfos() nomNodeName := pod.Status.NominatedNodeName if len(nomNodeName) > 0 { @@ -249,9 +252,9 @@ func (pl *DefaultPreemption) PodEligibleToPreemptOthers(pod *v1.Pod, nominatedNo if nodeInfo, _ := nodeInfos.Get(nomNodeName); nodeInfo != nil { podPriority := corev1helpers.PodPriority(pod) for _, p := range nodeInfo.Pods { - if p.Pod.DeletionTimestamp != nil && corev1helpers.PodPriority(p.Pod) < podPriority { + if corev1helpers.PodPriority(p.Pod) < podPriority && podTerminatingByPreemption(p.Pod, pl.fts.EnablePodDisruptionConditions) { // There is a terminating pod on the nominated node. - return false, fmt.Sprint("not eligible due to a terminating pod on the nominated node.") + return false, "not eligible due to a terminating pod on the nominated node." } } } @@ -259,6 +262,25 @@ func (pl *DefaultPreemption) PodEligibleToPreemptOthers(pod *v1.Pod, nominatedNo return true, "" } +// podTerminatingByPreemption returns the pod's terminating state if feature PodDisruptionConditions is not enabled. +// Otherwise, it additionally checks if the termination state is caused by scheduler preemption. +func podTerminatingByPreemption(p *v1.Pod, enablePodDisruptionConditions bool) bool { + if p.DeletionTimestamp == nil { + return false + } + + if !enablePodDisruptionConditions { + return true + } + + for _, condition := range p.Status.Conditions { + if condition.Type == v1.DisruptionTarget { + return condition.Status == v1.ConditionTrue && condition.Reason == v1.PodReasonPreemptionByScheduler + } + } + return false +} + // filterPodsWithPDBViolation groups the given "pods" into two groups of "violatingPods" // and "nonViolatingPods" based on whether their PDBs will be violated if they are // preempted. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources.go index 7d3c9f9f97ae..09b5e1af06f0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources.go @@ -320,7 +320,7 @@ func (pl *dynamicResources) PreFilter(ctx context.Context, state *framework.Cycl if err != nil { return nil, statusUnschedulable(logger, err.Error()) } - logger.V(5).Info("pod resource claims", "pod", klog.KObj(pod), "resourceclaims", klog.KObjs(claims)) + logger.V(5).Info("pod resource claims", "pod", klog.KObj(pod), "resourceclaims", klog.KObjSlice(claims)) // If the pod does not reference any claim, we don't need to do // anything for it. if len(claims) == 0 { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature/feature.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature/feature.go index e0790a681cae..04771b0d0f41 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature/feature.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature/feature.go @@ -27,4 +27,5 @@ type Features struct { EnableNodeInclusionPolicyInPodTopologySpread bool EnableMatchLabelKeysInPodTopologySpread bool EnablePodSchedulingReadiness bool + EnablePodDisruptionConditions bool } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/filtering.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/filtering.go index b84fc8a788b6..02a7c7926930 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/filtering.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/filtering.go @@ -150,8 +150,8 @@ func podMatchesAllAffinityTerms(terms []framework.AffinityTerm, pod *v1.Pod) boo } // calculates the following for each existing pod on each node: -// (1) Whether it has PodAntiAffinity -// (2) Whether any AffinityTerm matches the incoming pod +// 1. Whether it has PodAntiAffinity +// 2. Whether any AntiAffinityTerm matches the incoming pod func (pl *InterPodAffinity) getExistingAntiAffinityCounts(ctx context.Context, pod *v1.Pod, nsLabels labels.Set, nodes []*framework.NodeInfo) topologyToMatchedTermCount { topoMaps := make([]topologyToMatchedTermCount, len(nodes)) index := int32(-1) @@ -259,6 +259,10 @@ func (pl *InterPodAffinity) PreFilter(ctx context.Context, cycleState *framework s.existingAntiAffinityCounts = pl.getExistingAntiAffinityCounts(ctx, pod, s.namespaceLabels, nodesWithRequiredAntiAffinityPods) s.affinityCounts, s.antiAffinityCounts = pl.getIncomingAffinityAntiAffinityCounts(ctx, s.podInfo, allNodes) + if len(s.existingAntiAffinityCounts) == 0 && len(s.podInfo.RequiredAffinityTerms) == 0 && len(s.podInfo.RequiredAntiAffinityTerms) == 0 { + return nil, framework.NewStatus(framework.Skip) + } + cycleState.Write(preFilterStateKey, s) return nil, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/scoring.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/scoring.go index 322ea8543782..fd4cc24327df 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/scoring.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/scoring.go @@ -142,6 +142,15 @@ func (pl *InterPodAffinity) PreScore( hasPreferredAffinityConstraints := affinity != nil && affinity.PodAffinity != nil && len(affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution) > 0 hasPreferredAntiAffinityConstraints := affinity != nil && affinity.PodAntiAffinity != nil && len(affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution) > 0 + // Optionally ignore calculating preferences of existing pods' affinity rules + // if the incoming pod has no inter-pod affinities. + if pl.args.IgnorePreferredTermsOfExistingPods && !hasPreferredAffinityConstraints && !hasPreferredAntiAffinityConstraints { + cycleState.Write(preScoreStateKey, &preScoreState{ + topologyScore: make(map[string]map[string]int64), + }) + return nil + } + // Unless the pod being scheduled has preferred affinity terms, we only // need to process nodes hosting pods with affinity. var allNodes []*framework.NodeInfo diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity.go index 41e0a27d708d..841da141b58f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity.go @@ -20,7 +20,7 @@ import ( "context" "fmt" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" @@ -89,13 +89,19 @@ func (pl *NodeAffinity) EventsToRegister() []framework.ClusterEvent { // PreFilter builds and writes cycle state used by Filter. func (pl *NodeAffinity) PreFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *framework.Status) { - state := &preFilterState{requiredNodeSelectorAndAffinity: nodeaffinity.GetRequiredNodeAffinity(pod)} - cycleState.Write(preFilterStateKey, state) affinity := pod.Spec.Affinity - if affinity == nil || + noNodeAffinity := (affinity == nil || affinity.NodeAffinity == nil || - affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil || - len(affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms) == 0 { + affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil) + if noNodeAffinity && pl.addedNodeSelector == nil && pod.Spec.NodeSelector == nil { + // NodeAffinity Filter has nothing to do with the Pod. + return nil, framework.NewStatus(framework.Skip) + } + + state := &preFilterState{requiredNodeSelectorAndAffinity: nodeaffinity.GetRequiredNodeAffinity(pod)} + cycleState.Write(preFilterStateKey, state) + + if noNodeAffinity || len(affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms) == 0 { return nil, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/balanced_allocation.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/balanced_allocation.go index baad87699ac6..4c0438bbc3da 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/balanced_allocation.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/balanced_allocation.go @@ -78,28 +78,25 @@ func NewBalancedAllocation(baArgs runtime.Object, h framework.Handle, fts featur return nil, err } - resToWeightMap := make(resourceToWeightMap) - - for _, resource := range args.Resources { - resToWeightMap[v1.ResourceName(resource.Name)] = resource.Weight - } - return &BalancedAllocation{ handle: h, resourceAllocationScorer: resourceAllocationScorer{ - Name: BalancedAllocationName, - scorer: balancedResourceScorer, - useRequested: true, - resourceToWeightMap: resToWeightMap, + Name: BalancedAllocationName, + scorer: balancedResourceScorer, + useRequested: true, + resources: args.Resources, }, }, nil } -func balancedResourceScorer(requested, allocable resourceToValueMap) int64 { +func balancedResourceScorer(requested, allocable []int64) int64 { var resourceToFractions []float64 var totalFraction float64 - for name, value := range requested { - fraction := float64(value) / float64(allocable[name]) + for i := range requested { + if allocable[i] == 0 { + continue + } + fraction := float64(requested[i]) / float64(allocable[i]) if fraction > 1 { fraction = 1 } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/fit.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/fit.go index ff62eff0041c..27cb3233eef7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/fit.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/fit.go @@ -49,27 +49,27 @@ const ( // nodeResourceStrategyTypeMap maps strategy to scorer implementation var nodeResourceStrategyTypeMap = map[config.ScoringStrategyType]scorer{ config.LeastAllocated: func(args *config.NodeResourcesFitArgs) *resourceAllocationScorer { - resToWeightMap := resourcesToWeightMap(args.ScoringStrategy.Resources) + resources := args.ScoringStrategy.Resources return &resourceAllocationScorer{ - Name: string(config.LeastAllocated), - scorer: leastResourceScorer(resToWeightMap), - resourceToWeightMap: resToWeightMap, + Name: string(config.LeastAllocated), + scorer: leastResourceScorer(resources), + resources: resources, } }, config.MostAllocated: func(args *config.NodeResourcesFitArgs) *resourceAllocationScorer { - resToWeightMap := resourcesToWeightMap(args.ScoringStrategy.Resources) + resources := args.ScoringStrategy.Resources return &resourceAllocationScorer{ - Name: string(config.MostAllocated), - scorer: mostResourceScorer(resToWeightMap), - resourceToWeightMap: resToWeightMap, + Name: string(config.MostAllocated), + scorer: mostResourceScorer(resources), + resources: resources, } }, config.RequestedToCapacityRatio: func(args *config.NodeResourcesFitArgs) *resourceAllocationScorer { - resToWeightMap := resourcesToWeightMap(args.ScoringStrategy.Resources) + resources := args.ScoringStrategy.Resources return &resourceAllocationScorer{ - Name: string(config.RequestedToCapacityRatio), - scorer: requestedToCapacityRatioScorer(resToWeightMap, args.ScoringStrategy.RequestedToCapacityRatio.Shape), - resourceToWeightMap: resToWeightMap, + Name: string(config.RequestedToCapacityRatio), + scorer: requestedToCapacityRatioScorer(resources, args.ScoringStrategy.RequestedToCapacityRatio.Shape), + resources: resources, } }, } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/least_allocated.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/least_allocated.go index 1abf97bc678a..f571bcbc2a8a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/least_allocated.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/least_allocated.go @@ -17,6 +17,7 @@ limitations under the License. package noderesources import ( + "k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/framework" ) @@ -26,12 +27,15 @@ import ( // // Details: // (cpu((capacity-requested)*MaxNodeScore*cpuWeight/capacity) + memory((capacity-requested)*MaxNodeScore*memoryWeight/capacity) + ...)/weightSum -func leastResourceScorer(resToWeightMap resourceToWeightMap) func(resourceToValueMap, resourceToValueMap) int64 { - return func(requested, allocable resourceToValueMap) int64 { +func leastResourceScorer(resources []config.ResourceSpec) func([]int64, []int64) int64 { + return func(requested, allocable []int64) int64 { var nodeScore, weightSum int64 - for resource := range requested { - weight := resToWeightMap[resource] - resourceScore := leastRequestedScore(requested[resource], allocable[resource]) + for i := range requested { + if allocable[i] == 0 { + continue + } + weight := resources[i].Weight + resourceScore := leastRequestedScore(requested[i], allocable[i]) nodeScore += resourceScore * weight weightSum += weight } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/most_allocated.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/most_allocated.go index 94c55cbe3255..41425698835f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/most_allocated.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/most_allocated.go @@ -17,6 +17,7 @@ limitations under the License. package noderesources import ( + "k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/framework" ) @@ -26,12 +27,15 @@ import ( // // Details: // (cpu(MaxNodeScore * requested * cpuWeight / capacity) + memory(MaxNodeScore * requested * memoryWeight / capacity) + ...) / weightSum -func mostResourceScorer(resToWeightMap resourceToWeightMap) func(requested, allocable resourceToValueMap) int64 { - return func(requested, allocable resourceToValueMap) int64 { +func mostResourceScorer(resources []config.ResourceSpec) func(requested, allocable []int64) int64 { + return func(requested, allocable []int64) int64 { var nodeScore, weightSum int64 - for resource := range requested { - weight := resToWeightMap[resource] - resourceScore := mostRequestedScore(requested[resource], allocable[resource]) + for i := range requested { + if allocable[i] == 0 { + continue + } + weight := resources[i].Weight + resourceScore := mostRequestedScore(requested[i], allocable[i]) nodeScore += resourceScore * weight weightSum += weight } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/requested_to_capacity_ratio.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/requested_to_capacity_ratio.go index 36cd9f1e623f..df5e2ff4733e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/requested_to_capacity_ratio.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/requested_to_capacity_ratio.go @@ -28,7 +28,7 @@ const maxUtilization = 100 // buildRequestedToCapacityRatioScorerFunction allows users to apply bin packing // on core resources like CPU, Memory as well as extended resources like accelerators. -func buildRequestedToCapacityRatioScorerFunction(scoringFunctionShape helper.FunctionShape, resourceToWeightMap resourceToWeightMap) func(resourceToValueMap, resourceToValueMap) int64 { +func buildRequestedToCapacityRatioScorerFunction(scoringFunctionShape helper.FunctionShape, resources []config.ResourceSpec) func([]int64, []int64) int64 { rawScoringFunction := helper.BuildBrokenLinearFunction(scoringFunctionShape) resourceScoringFunction := func(requested, capacity int64) int64 { if capacity == 0 || requested > capacity { @@ -37,11 +37,14 @@ func buildRequestedToCapacityRatioScorerFunction(scoringFunctionShape helper.Fun return rawScoringFunction(requested * maxUtilization / capacity) } - return func(requested, allocable resourceToValueMap) int64 { + return func(requested, allocable []int64) int64 { var nodeScore, weightSum int64 - for resource := range requested { - weight := resourceToWeightMap[resource] - resourceScore := resourceScoringFunction(requested[resource], allocable[resource]) + for i := range requested { + if allocable[i] == 0 { + continue + } + weight := resources[i].Weight + resourceScore := resourceScoringFunction(requested[i], allocable[i]) if resourceScore > 0 { nodeScore += resourceScore * weight weightSum += weight @@ -54,7 +57,7 @@ func buildRequestedToCapacityRatioScorerFunction(scoringFunctionShape helper.Fun } } -func requestedToCapacityRatioScorer(weightMap resourceToWeightMap, shape []config.UtilizationShapePoint) func(resourceToValueMap, resourceToValueMap) int64 { +func requestedToCapacityRatioScorer(resources []config.ResourceSpec, shape []config.UtilizationShapePoint) func([]int64, []int64) int64 { shapes := make([]helper.FunctionShapePoint, 0, len(shape)) for _, point := range shape { shapes = append(shapes, helper.FunctionShapePoint{ @@ -66,5 +69,5 @@ func requestedToCapacityRatioScorer(weightMap resourceToWeightMap, shape []confi }) } - return buildRequestedToCapacityRatioScorerFunction(shapes, weightMap) + return buildRequestedToCapacityRatioScorerFunction(shapes, resources) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/resource_allocation.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/resource_allocation.go index 6715ccffab20..af3e8b15ca9b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/resource_allocation.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/resource_allocation.go @@ -24,9 +24,6 @@ import ( schedutil "k8s.io/kubernetes/pkg/scheduler/util" ) -// resourceToWeightMap contains resource name and weight. -type resourceToWeightMap map[v1.ResourceName]int64 - // scorer is decorator for resourceAllocationScorer type scorer func(args *config.NodeResourcesFitArgs) *resourceAllocationScorer @@ -35,14 +32,11 @@ type resourceAllocationScorer struct { Name string // used to decide whether to use Requested or NonZeroRequested for // cpu and memory. - useRequested bool - scorer func(requested, allocable resourceToValueMap) int64 - resourceToWeightMap resourceToWeightMap + useRequested bool + scorer func(requested, allocable []int64) int64 + resources []config.ResourceSpec } -// resourceToValueMap is keyed with resource name and valued with quantity. -type resourceToValueMap map[v1.ResourceName]int64 - // score will use `scorer` function to calculate the score. func (r *resourceAllocationScorer) score( pod *v1.Pod, @@ -51,26 +45,31 @@ func (r *resourceAllocationScorer) score( if node == nil { return 0, framework.NewStatus(framework.Error, "node not found") } - if r.resourceToWeightMap == nil { + // resources not set, nothing scheduled, + if len(r.resources) == 0 { return 0, framework.NewStatus(framework.Error, "resources not found") } - requested := make(resourceToValueMap) - allocatable := make(resourceToValueMap) - for resource := range r.resourceToWeightMap { - alloc, req := r.calculateResourceAllocatableRequest(nodeInfo, pod, resource) - if alloc != 0 { - // Only fill the extended resource entry when it's non-zero. - allocatable[resource], requested[resource] = alloc, req + requested := make([]int64, len(r.resources)) + allocatable := make([]int64, len(r.resources)) + for i := range r.resources { + alloc, req := r.calculateResourceAllocatableRequest(nodeInfo, pod, v1.ResourceName(r.resources[i].Name)) + // Only fill the extended resource entry when it's non-zero. + if alloc == 0 { + continue } + allocatable[i] = alloc + requested[i] = req } score := r.scorer(requested, allocatable) - klog.V(10).InfoS("Listing internal info for allocatable resources, requested resources and score", "pod", - klog.KObj(pod), "node", klog.KObj(node), "resourceAllocationScorer", r.Name, - "allocatableResource", allocatable, "requestedResource", requested, "resourceScore", score, - ) + if klogV := klog.V(10); klogV.Enabled() { // Serializing these maps is costly. + klogV.InfoS("Listing internal info for allocatable resources, requested resources and score", "pod", + klog.KObj(pod), "node", klog.KObj(node), "resourceAllocationScorer", r.Name, + "allocatableResource", allocatable, "requestedResource", requested, "resourceScore", score, + ) + } return score, nil } @@ -135,12 +134,3 @@ func (r *resourceAllocationScorer) calculatePodResourceRequest(pod *v1.Pod, reso return podRequest } - -// resourcesToWeightMap make weightmap from resources spec -func resourcesToWeightMap(resources []config.ResourceSpec) resourceToWeightMap { - resourceToWeightMap := make(resourceToWeightMap) - for _, resource := range resources { - resourceToWeightMap[v1.ResourceName(resource.Name)] = resource.Weight - } - return resourceToWeightMap -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/registry.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/registry.go index 5769aee6e121..a3a9a0f13878 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/registry.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/registry.go @@ -54,6 +54,7 @@ func NewInTreeRegistry() runtime.Registry { EnableNodeInclusionPolicyInPodTopologySpread: feature.DefaultFeatureGate.Enabled(features.NodeInclusionPolicyInPodTopologySpread), EnableMatchLabelKeysInPodTopologySpread: feature.DefaultFeatureGate.Enabled(features.MatchLabelKeysInPodTopologySpread), EnablePodSchedulingReadiness: feature.DefaultFeatureGate.Enabled(features.PodSchedulingReadiness), + EnablePodDisruptionConditions: feature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions), } registry := runtime.Registry{ diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/binder.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/binder.go index a4be0899ad06..ad830d8ee6ac 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/binder.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/binder.go @@ -45,6 +45,7 @@ import ( v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/metrics" + "k8s.io/kubernetes/pkg/volume/util" ) // ConflictReason is used for the special strings which explain why @@ -126,6 +127,8 @@ type InTreeToCSITranslator interface { // 1. The scheduler takes a Pod off the scheduler queue and processes it serially: // a. Invokes all pre-filter plugins for the pod. GetPodVolumes() is invoked // here, pod volume information will be saved in current scheduling cycle state for later use. +// If pod has bound immediate PVCs, GetEligibleNodes() is invoked to potentially reduce +// down the list of eligible nodes based on the bound PV's NodeAffinity (if any). // b. Invokes all filter plugins, parallelized across nodes. FindPodVolumes() is invoked here. // c. Invokes all score plugins. Future/TBD // d. Selects the best node for the Pod. @@ -144,9 +147,17 @@ type InTreeToCSITranslator interface { // 2. Once all the assume operations are done in e), the scheduler processes the next Pod in the scheduler queue // while the actual binding operation occurs in the background. type SchedulerVolumeBinder interface { - // GetPodVolumes returns a pod's PVCs separated into bound, unbound with delayed binding (including provisioning) - // and unbound with immediate binding (including prebound) - GetPodVolumes(pod *v1.Pod) (boundClaims, unboundClaimsDelayBinding, unboundClaimsImmediate []*v1.PersistentVolumeClaim, err error) + // GetPodVolumeClaims returns a pod's PVCs separated into bound, unbound with delayed binding (including provisioning), + // unbound with immediate binding (including prebound) and PVs that belong to storage classes of unbound PVCs with delayed binding. + GetPodVolumeClaims(pod *v1.Pod) (podVolumeClaims *PodVolumeClaims, err error) + + // GetEligibleNodes checks the existing bound claims of the pod to determine if the list of nodes can be + // potentially reduced down to a subset of eligible nodes based on the bound claims which then can be used + // in subsequent scheduling stages. + // + // If eligibleNodes is 'nil', then it indicates that such eligible node reduction cannot be made + // and all nodes should be considered. + GetEligibleNodes(boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.String) // FindPodVolumes checks if all of a Pod's PVCs can be satisfied by the // node and returns pod's volumes information. @@ -161,7 +172,7 @@ type SchedulerVolumeBinder interface { // for volumes that still need to be created. // // This function is called by the scheduler VolumeBinding plugin and can be called in parallel - FindPodVolumes(pod *v1.Pod, boundClaims, claimsToBind []*v1.PersistentVolumeClaim, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) + FindPodVolumes(pod *v1.Pod, podVolumeClaims *PodVolumeClaims, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) // AssumePodVolumes will: // 1. Take the PV matches for unbound PVCs and update the PV cache assuming @@ -188,6 +199,17 @@ type SchedulerVolumeBinder interface { BindPodVolumes(ctx context.Context, assumedPod *v1.Pod, podVolumes *PodVolumes) error } +type PodVolumeClaims struct { + // boundClaims are the pod's bound PVCs. + boundClaims []*v1.PersistentVolumeClaim + // unboundClaimsDelayBinding are the pod's unbound with delayed binding (including provisioning) PVCs. + unboundClaimsDelayBinding []*v1.PersistentVolumeClaim + // unboundClaimsImmediate are the pod's unbound with immediate binding PVCs (i.e., supposed to be bound already) . + unboundClaimsImmediate []*v1.PersistentVolumeClaim + // unboundVolumesDelayBinding are PVs that belong to storage classes of the pod's unbound PVCs with delayed binding. + unboundVolumesDelayBinding map[string][]*v1.PersistentVolume +} + type volumeBinder struct { kubeClient clientset.Interface @@ -208,6 +230,8 @@ type volumeBinder struct { csiStorageCapacityLister storagelisters.CSIStorageCapacityLister } +var _ SchedulerVolumeBinder = &volumeBinder{} + // CapacityCheck contains additional parameters for NewVolumeBinder that // are only needed when checking volume sizes against available storage // capacity is desired. @@ -248,9 +272,9 @@ func NewVolumeBinder( } // FindPodVolumes finds the matching PVs for PVCs and nodes to provision PVs -// for the given pod and node. If the node does not fit, confilict reasons are +// for the given pod and node. If the node does not fit, conflict reasons are // returned. -func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, boundClaims, claimsToBind []*v1.PersistentVolumeClaim, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) { +func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, podVolumeClaims *PodVolumeClaims, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) { podVolumes = &PodVolumes{} // Warning: Below log needs high verbosity as it can be printed several times (#60933). @@ -305,22 +329,22 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, boundClaims, claimsToBind []* }() // Check PV node affinity on bound volumes - if len(boundClaims) > 0 { - boundVolumesSatisfied, boundPVsFound, err = b.checkBoundClaims(boundClaims, node, pod) + if len(podVolumeClaims.boundClaims) > 0 { + boundVolumesSatisfied, boundPVsFound, err = b.checkBoundClaims(podVolumeClaims.boundClaims, node, pod) if err != nil { return } } // Find matching volumes and node for unbound claims - if len(claimsToBind) > 0 { + if len(podVolumeClaims.unboundClaimsDelayBinding) > 0 { var ( claimsToFindMatching []*v1.PersistentVolumeClaim claimsToProvision []*v1.PersistentVolumeClaim ) // Filter out claims to provision - for _, claim := range claimsToBind { + for _, claim := range podVolumeClaims.unboundClaimsDelayBinding { if selectedNode, ok := claim.Annotations[volume.AnnSelectedNode]; ok { if selectedNode != node.Name { // Fast path, skip unmatched node. @@ -336,7 +360,7 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, boundClaims, claimsToBind []* // Find matching volumes if len(claimsToFindMatching) > 0 { var unboundClaims []*v1.PersistentVolumeClaim - unboundVolumesSatisfied, staticBindings, unboundClaims, err = b.findMatchingVolumes(pod, claimsToFindMatching, node) + unboundVolumesSatisfied, staticBindings, unboundClaims, err = b.findMatchingVolumes(pod, claimsToFindMatching, podVolumeClaims.unboundVolumesDelayBinding, node) if err != nil { return } @@ -356,6 +380,55 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, boundClaims, claimsToBind []* return } +// GetEligibleNodes checks the existing bound claims of the pod to determine if the list of nodes can be +// potentially reduced down to a subset of eligible nodes based on the bound claims which then can be used +// in subsequent scheduling stages. +// +// Returning 'nil' for eligibleNodes indicates that such eligible node reduction cannot be made and all nodes +// should be considered. +func (b *volumeBinder) GetEligibleNodes(boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.String) { + if len(boundClaims) == 0 { + return + } + + var errs []error + for _, pvc := range boundClaims { + pvName := pvc.Spec.VolumeName + pv, err := b.pvCache.GetPV(pvName) + if err != nil { + errs = append(errs, err) + continue + } + + // if the PersistentVolume is local and has node affinity matching specific node(s), + // add them to the eligible nodes + nodeNames := util.GetLocalPersistentVolumeNodeNames(pv) + if len(nodeNames) != 0 { + // on the first found list of eligible nodes for the local PersistentVolume, + // insert to the eligible node set. + if eligibleNodes == nil { + eligibleNodes = sets.NewString(nodeNames...) + } else { + // for subsequent finding of eligible nodes for the local PersistentVolume, + // take the intersection of the nodes with the existing eligible nodes + // for cases if PV1 has node affinity to node1 and PV2 has node affinity to node2, + // then the eligible node list should be empty. + eligibleNodes = eligibleNodes.Intersection(sets.NewString(nodeNames...)) + } + } + } + + if len(errs) > 0 { + klog.V(4).InfoS("GetEligibleNodes: one or more error occurred finding eligible nodes", "error", errs) + return nil + } + + if eligibleNodes != nil { + klog.V(4).InfoS("GetEligibleNodes: reduced down eligible nodes", "nodes", eligibleNodes) + } + return +} + // AssumePodVolumes will take the matching PVs and PVCs to provision in pod's // volume information for the chosen node, and: // 1. Update the pvCache with the new prebound PV. @@ -742,40 +815,49 @@ func (b *volumeBinder) arePodVolumesBound(pod *v1.Pod) bool { return true } -// GetPodVolumes returns a pod's PVCs separated into bound, unbound with delayed binding (including provisioning) -// and unbound with immediate binding (including prebound) -func (b *volumeBinder) GetPodVolumes(pod *v1.Pod) (boundClaims []*v1.PersistentVolumeClaim, unboundClaimsDelayBinding []*v1.PersistentVolumeClaim, unboundClaimsImmediate []*v1.PersistentVolumeClaim, err error) { - boundClaims = []*v1.PersistentVolumeClaim{} - unboundClaimsImmediate = []*v1.PersistentVolumeClaim{} - unboundClaimsDelayBinding = []*v1.PersistentVolumeClaim{} +// GetPodVolumeClaims returns a pod's PVCs separated into bound, unbound with delayed binding (including provisioning), +// unbound with immediate binding (including prebound) and PVs that belong to storage classes of unbound PVCs with delayed binding. +func (b *volumeBinder) GetPodVolumeClaims(pod *v1.Pod) (podVolumeClaims *PodVolumeClaims, err error) { + podVolumeClaims = &PodVolumeClaims{ + boundClaims: []*v1.PersistentVolumeClaim{}, + unboundClaimsImmediate: []*v1.PersistentVolumeClaim{}, + unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{}, + } for _, vol := range pod.Spec.Volumes { volumeBound, pvc, err := b.isVolumeBound(pod, &vol) if err != nil { - return nil, nil, nil, err + return podVolumeClaims, err } if pvc == nil { continue } if volumeBound { - boundClaims = append(boundClaims, pvc) + podVolumeClaims.boundClaims = append(podVolumeClaims.boundClaims, pvc) } else { delayBindingMode, err := volume.IsDelayBindingMode(pvc, b.classLister) if err != nil { - return nil, nil, nil, err + return podVolumeClaims, err } // Prebound PVCs are treated as unbound immediate binding if delayBindingMode && pvc.Spec.VolumeName == "" { // Scheduler path - unboundClaimsDelayBinding = append(unboundClaimsDelayBinding, pvc) + podVolumeClaims.unboundClaimsDelayBinding = append(podVolumeClaims.unboundClaimsDelayBinding, pvc) } else { // !delayBindingMode || pvc.Spec.VolumeName != "" // Immediate binding should have already been bound - unboundClaimsImmediate = append(unboundClaimsImmediate, pvc) + podVolumeClaims.unboundClaimsImmediate = append(podVolumeClaims.unboundClaimsImmediate, pvc) } } } - return boundClaims, unboundClaimsDelayBinding, unboundClaimsImmediate, nil + + podVolumeClaims.unboundVolumesDelayBinding = map[string][]*v1.PersistentVolume{} + for _, pvc := range podVolumeClaims.unboundClaimsDelayBinding { + // Get storage class name from each PVC + storageClassName := volume.GetPersistentVolumeClaimClass(pvc) + podVolumeClaims.unboundVolumesDelayBinding[storageClassName] = b.pvCache.ListPVs(storageClassName) + } + return podVolumeClaims, nil } func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node *v1.Node, pod *v1.Pod) (bool, bool, error) { @@ -814,7 +896,7 @@ func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node // findMatchingVolumes tries to find matching volumes for given claims, // and return unbound claims for further provision. -func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.PersistentVolumeClaim, node *v1.Node) (foundMatches bool, bindings []*BindingInfo, unboundClaims []*v1.PersistentVolumeClaim, err error) { +func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.PersistentVolumeClaim, unboundVolumesDelayBinding map[string][]*v1.PersistentVolume, node *v1.Node) (foundMatches bool, bindings []*BindingInfo, unboundClaims []*v1.PersistentVolumeClaim, err error) { // Sort all the claims by increasing size request to get the smallest fits sort.Sort(byPVCSize(claimsToBind)) @@ -825,10 +907,10 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.Persi for _, pvc := range claimsToBind { // Get storage class name from each PVC storageClassName := volume.GetPersistentVolumeClaimClass(pvc) - allPVs := b.pvCache.ListPVs(storageClassName) + pvs := unboundVolumesDelayBinding[storageClassName] // Find a matching PV - pv, err := volume.FindMatchingVolume(pvc, allPVs, node, chosenPVs, true) + pv, err := volume.FindMatchingVolume(pvc, pvs, node, chosenPVs, true) if err != nil { return false, nil, nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/fake_binder.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/fake_binder.go index ef28891f2886..c77c9733789a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/fake_binder.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/fake_binder.go @@ -20,6 +20,7 @@ import ( "context" v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" ) // FakeVolumeBinderConfig holds configurations for fake volume binder. @@ -46,13 +47,20 @@ type FakeVolumeBinder struct { BindCalled bool } -// GetPodVolumes implements SchedulerVolumeBinder.GetPodVolumes. -func (b *FakeVolumeBinder) GetPodVolumes(pod *v1.Pod) (boundClaims, unboundClaimsDelayBinding, unboundClaimsImmediate []*v1.PersistentVolumeClaim, err error) { - return nil, nil, nil, nil +var _ SchedulerVolumeBinder = &FakeVolumeBinder{} + +// GetPodVolumeClaims implements SchedulerVolumeBinder.GetPodVolumes. +func (b *FakeVolumeBinder) GetPodVolumeClaims(pod *v1.Pod) (podVolumeClaims *PodVolumeClaims, err error) { + return &PodVolumeClaims{}, nil +} + +// GetEligibleNodes implements SchedulerVolumeBinder.GetEligibleNodes. +func (b *FakeVolumeBinder) GetEligibleNodes(boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.String) { + return nil } // FindPodVolumes implements SchedulerVolumeBinder.FindPodVolumes. -func (b *FakeVolumeBinder) FindPodVolumes(pod *v1.Pod, _, _ []*v1.PersistentVolumeClaim, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) { +func (b *FakeVolumeBinder) FindPodVolumes(pod *v1.Pod, _ *PodVolumeClaims, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) { return nil, b.config.FindReasons, b.config.FindErr } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go index 3983064cb720..3d251ba4a55d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go @@ -47,14 +47,12 @@ const ( // framework.CycleState, in the later phases we don't need to call Write method // to update the value type stateData struct { - skip bool // set true if pod does not have PVCs - boundClaims []*v1.PersistentVolumeClaim - claimsToBind []*v1.PersistentVolumeClaim - allBound bool + allBound bool // podVolumesByNode holds the pod's volume information found in the Filter // phase for each node // it's initialized in the PreFilter phase podVolumesByNode map[string]*PodVolumes + podVolumeClaims *PodVolumeClaims sync.Mutex } @@ -167,14 +165,14 @@ func (pl *VolumeBinding) PreFilter(ctx context.Context, state *framework.CycleSt if hasPVC, err := pl.podHasPVCs(pod); err != nil { return nil, framework.NewStatus(framework.UnschedulableAndUnresolvable, err.Error()) } else if !hasPVC { - state.Write(stateKey, &stateData{skip: true}) - return nil, nil + state.Write(stateKey, &stateData{}) + return nil, framework.NewStatus(framework.Skip) } - boundClaims, claimsToBind, unboundClaimsImmediate, err := pl.Binder.GetPodVolumes(pod) + podVolumeClaims, err := pl.Binder.GetPodVolumeClaims(pod) if err != nil { return nil, framework.AsStatus(err) } - if len(unboundClaimsImmediate) > 0 { + if len(podVolumeClaims.unboundClaimsImmediate) > 0 { // Return UnschedulableAndUnresolvable error if immediate claims are // not bound. Pod will be moved to active/backoff queues once these // claims are bound by PV controller. @@ -182,8 +180,23 @@ func (pl *VolumeBinding) PreFilter(ctx context.Context, state *framework.CycleSt status.AppendReason("pod has unbound immediate PersistentVolumeClaims") return nil, status } - state.Write(stateKey, &stateData{boundClaims: boundClaims, claimsToBind: claimsToBind, podVolumesByNode: make(map[string]*PodVolumes)}) - return nil, nil + // Attempt to reduce down the number of nodes to consider in subsequent scheduling stages if pod has bound claims. + var result *framework.PreFilterResult + if eligibleNodes := pl.Binder.GetEligibleNodes(podVolumeClaims.boundClaims); eligibleNodes != nil { + result = &framework.PreFilterResult{ + NodeNames: eligibleNodes, + } + } + + state.Write(stateKey, &stateData{ + podVolumesByNode: make(map[string]*PodVolumes), + podVolumeClaims: &PodVolumeClaims{ + boundClaims: podVolumeClaims.boundClaims, + unboundClaimsDelayBinding: podVolumeClaims.unboundClaimsDelayBinding, + unboundVolumesDelayBinding: podVolumeClaims.unboundVolumesDelayBinding, + }, + }) + return result, nil } // PreFilterExtensions returns prefilter extensions, pod add and remove. @@ -229,11 +242,7 @@ func (pl *VolumeBinding) Filter(ctx context.Context, cs *framework.CycleState, p return framework.AsStatus(err) } - if state.skip { - return nil - } - - podVolumes, reasons, err := pl.Binder.FindPodVolumes(pod, state.boundClaims, state.claimsToBind, node) + podVolumes, reasons, err := pl.Binder.FindPodVolumes(pod, state.podVolumeClaims, node) if err != nil { return framework.AsStatus(err) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone/volume_zone.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone/volume_zone.go index 1bfab85fc868..f3f0f5dfe348 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone/volume_zone.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone/volume_zone.go @@ -18,11 +18,12 @@ package volumezone import ( "context" + "errors" "fmt" v1 "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" corelisters "k8s.io/client-go/listers/core/v1" @@ -42,67 +43,71 @@ type VolumeZone struct { } var _ framework.FilterPlugin = &VolumeZone{} +var _ framework.PreFilterPlugin = &VolumeZone{} var _ framework.EnqueueExtensions = &VolumeZone{} const ( // Name is the name of the plugin used in the plugin registry and configurations. Name = names.VolumeZone + preFilterStateKey framework.StateKey = "PreFilter" + Name + // ErrReasonConflict is used for NoVolumeZoneConflict predicate error. ErrReasonConflict = "node(s) had no available volume zone" ) -var volumeZoneLabels = sets.NewString( +// pvTopology holds the value of a pv's topologyLabel +type pvTopology struct { + pvName string + key string + values sets.String +} + +// the state is initialized in PreFilter phase. because we save the pointer in +// framework.CycleState, in the later phases we don't need to call Write method +// to update the value +type stateData struct { + // podPVTopologies holds the pv information we need + // it's initialized in the PreFilter phase + podPVTopologies []pvTopology +} + +func (d *stateData) Clone() framework.StateData { + return d +} + +var topologyLabels = []string{ v1.LabelFailureDomainBetaZone, v1.LabelFailureDomainBetaRegion, v1.LabelTopologyZone, v1.LabelTopologyRegion, -) +} // Name returns name of the plugin. It is used in logs, etc. func (pl *VolumeZone) Name() string { return Name } -// Filter invoked at the filter extension point. -// -// It evaluates if a pod can fit due to the volumes it requests, given -// that some volumes may have zone scheduling constraints. The requirement is that any -// volume zone-labels must match the equivalent zone-labels on the node. It is OK for -// the node to have more zone-label constraints (for example, a hypothetical replicated -// volume might allow region-wide access) +// PreFilter invoked at the prefilter extension point // -// Currently this is only supported with PersistentVolumeClaims, and looks to the labels -// only on the bound PersistentVolume. +// # It finds the topology of the PersistentVolumes corresponding to the volumes a pod requests // -// Working with volumes declared inline in the pod specification (i.e. not -// using a PersistentVolume) is likely to be harder, as it would require -// determining the zone of a volume during scheduling, and that is likely to -// require calling out to the cloud provider. It seems that we are moving away -// from inline volume declarations anyway. -func (pl *VolumeZone) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { - // If a pod doesn't have any volume attached to it, the predicate will always be true. - // Thus we make a fast path for it, to avoid unnecessary computations in this case. - if len(pod.Spec.Volumes) == 0 { - return nil +// Currently, this is only supported with PersistentVolumeClaims, +// and only looks for the bound PersistentVolume. +func (pl *VolumeZone) PreFilter(ctx context.Context, cs *framework.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *framework.Status) { + podPVTopologies, status := pl.getPVbyPod(ctx, pod) + if !status.IsSuccess() { + return nil, status } - node := nodeInfo.Node() - if node == nil { - return framework.NewStatus(framework.Error, "node not found") - } - nodeConstraints := make(map[string]string) - for k, v := range node.ObjectMeta.Labels { - if !volumeZoneLabels.Has(k) { - continue - } - nodeConstraints[k] = v - } - if len(nodeConstraints) == 0 { - // The node has no zone constraints, so we're OK to schedule. - // In practice, when using zones, all nodes must be labeled with zone labels. - // We want to fast-path this case though. - return nil + if len(podPVTopologies) == 0 { + return nil, framework.NewStatus(framework.Skip) } + cs.Write(preFilterStateKey, &stateData{podPVTopologies: podPVTopologies}) + return nil, nil +} + +func (pl *VolumeZone) getPVbyPod(ctx context.Context, pod *v1.Pod) ([]pvTopology, *framework.Status) { + podPVTopologies := make([]pvTopology, 0) for i := range pod.Spec.Volumes { volume := pod.Spec.Volumes[i] @@ -111,63 +116,139 @@ func (pl *VolumeZone) Filter(ctx context.Context, _ *framework.CycleState, pod * } pvcName := volume.PersistentVolumeClaim.ClaimName if pvcName == "" { - return framework.NewStatus(framework.UnschedulableAndUnresolvable, "PersistentVolumeClaim had no name") + return nil, framework.NewStatus(framework.UnschedulableAndUnresolvable, "PersistentVolumeClaim had no name") } pvc, err := pl.pvcLister.PersistentVolumeClaims(pod.Namespace).Get(pvcName) if s := getErrorAsStatus(err); !s.IsSuccess() { - return s + return nil, s } pvName := pvc.Spec.VolumeName if pvName == "" { scName := storagehelpers.GetPersistentVolumeClaimClass(pvc) if len(scName) == 0 { - return framework.NewStatus(framework.UnschedulableAndUnresolvable, "PersistentVolumeClaim had no pv name and storageClass name") + return nil, framework.NewStatus(framework.UnschedulableAndUnresolvable, "PersistentVolumeClaim had no pv name and storageClass name") } class, err := pl.scLister.Get(scName) if s := getErrorAsStatus(err); !s.IsSuccess() { - return s + return nil, s } if class.VolumeBindingMode == nil { - return framework.NewStatus(framework.UnschedulableAndUnresolvable, fmt.Sprintf("VolumeBindingMode not set for StorageClass %q", scName)) + return nil, framework.NewStatus(framework.UnschedulableAndUnresolvable, fmt.Sprintf("VolumeBindingMode not set for StorageClass %q", scName)) } if *class.VolumeBindingMode == storage.VolumeBindingWaitForFirstConsumer { // Skip unbound volumes continue } - return framework.NewStatus(framework.UnschedulableAndUnresolvable, "PersistentVolume had no name") + return nil, framework.NewStatus(framework.UnschedulableAndUnresolvable, "PersistentVolume had no name") } pv, err := pl.pvLister.Get(pvName) if s := getErrorAsStatus(err); !s.IsSuccess() { - return s + return nil, s } - for k, v := range pv.ObjectMeta.Labels { - if !volumeZoneLabels.Has(k) { - continue - } - nodeV := nodeConstraints[k] - volumeVSet, err := volumehelpers.LabelZonesToSet(v) - if err != nil { - klog.InfoS("Failed to parse label, ignoring the label", "label", fmt.Sprintf("%s:%s", k, v), "err", err) - continue + for _, key := range topologyLabels { + if value, ok := pv.ObjectMeta.Labels[key]; ok { + volumeVSet, err := volumehelpers.LabelZonesToSet(value) + if err != nil { + klog.InfoS("Failed to parse label, ignoring the label", "label", fmt.Sprintf("%s:%s", key, value), "err", err) + continue + } + podPVTopologies = append(podPVTopologies, pvTopology{ + pvName: pv.Name, + key: key, + values: volumeVSet, + }) } + } + } + return podPVTopologies, nil +} - if !volumeVSet.Has(nodeV) { - klog.V(10).InfoS("Won't schedule pod onto node due to volume (mismatch on label key)", "pod", klog.KObj(pod), "node", klog.KObj(node), "PV", klog.KRef("", pvName), "PVLabelKey", k) - return framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReasonConflict) - } +// PreFilterExtensions returns prefilter extensions, pod add and remove. +func (pl *VolumeZone) PreFilterExtensions() framework.PreFilterExtensions { + return nil +} + +// Filter invoked at the filter extension point. +// +// It evaluates if a pod can fit due to the volumes it requests, given +// that some volumes may have zone scheduling constraints. The requirement is that any +// volume zone-labels must match the equivalent zone-labels on the node. It is OK for +// the node to have more zone-label constraints (for example, a hypothetical replicated +// volume might allow region-wide access) +// +// Currently this is only supported with PersistentVolumeClaims, and looks to the labels +// only on the bound PersistentVolume. +// +// Working with volumes declared inline in the pod specification (i.e. not +// using a PersistentVolume) is likely to be harder, as it would require +// determining the zone of a volume during scheduling, and that is likely to +// require calling out to the cloud provider. It seems that we are moving away +// from inline volume declarations anyway. +func (pl *VolumeZone) Filter(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { + // If a pod doesn't have any volume attached to it, the predicate will always be true. + // Thus we make a fast path for it, to avoid unnecessary computations in this case. + if len(pod.Spec.Volumes) == 0 { + return nil + } + var podPVTopologies []pvTopology + state, err := getStateData(cs) + if err != nil { + // Fallback to calculate pv list here + var status *framework.Status + podPVTopologies, status = pl.getPVbyPod(ctx, pod) + if !status.IsSuccess() { + return status } + } else { + podPVTopologies = state.podPVTopologies } + + node := nodeInfo.Node() + hasAnyNodeConstraint := false + for _, topologyLabel := range topologyLabels { + if _, ok := node.Labels[topologyLabel]; ok { + hasAnyNodeConstraint = true + break + } + } + + if !hasAnyNodeConstraint { + // The node has no zone constraints, so we're OK to schedule. + // This is to handle a single-zone cluster scenario where the node may not have any topology labels. + return nil + } + + for _, pvTopology := range podPVTopologies { + v, ok := node.Labels[pvTopology.key] + if !ok || !pvTopology.values.Has(v) { + klog.V(10).InfoS("Won't schedule pod onto node due to volume (mismatch on label key)", "pod", klog.KObj(pod), "node", klog.KObj(node), "PV", klog.KRef("", pvTopology.pvName), "PVLabelKey", pvTopology.key) + return framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReasonConflict) + } + } + return nil } +func getStateData(cs *framework.CycleState) (*stateData, error) { + state, err := cs.Read(preFilterStateKey) + if err != nil { + return nil, err + } + s, ok := state.(*stateData) + if !ok { + return nil, errors.New("unable to convert state into stateData") + } + return s, nil +} + func getErrorAsStatus(err error) *framework.Status { if err != nil { - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return framework.NewStatus(framework.UnschedulableAndUnresolvable, err.Error()) } return framework.AsStatus(err) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/preemption/preemption.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/preemption/preemption.go index 32e1e1ba7434..6cdee0959788 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/preemption/preemption.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/preemption/preemption.go @@ -361,8 +361,8 @@ func (ev *Evaluator) prepareCandidate(ctx context.Context, c Candidate, pod *v1. victimPodApply.Status.WithConditions(corev1apply.PodCondition(). WithType(v1.DisruptionTarget). WithStatus(v1.ConditionTrue). - WithReason("PreemptionByKubeScheduler"). - WithMessage(fmt.Sprintf("Kube-scheduler: preempting to accommodate a higher priority pod: %s", klog.KObj(pod))). + WithReason(v1.PodReasonPreemptionByScheduler). + WithMessage(fmt.Sprintf("%s: preempting to accommodate a higher priority pod", pod.Spec.SchedulerName)). WithLastTransitionTime(metav1.Now()), ) @@ -378,8 +378,7 @@ func (ev *Evaluator) prepareCandidate(ctx context.Context, c Candidate, pod *v1. return } } - fh.EventRecorder().Eventf(victim, pod, v1.EventTypeNormal, "Preempted", "Preempting", "Preempted by %v/%v on node %v", - pod.Namespace, pod.Name, c.Name()) + fh.EventRecorder().Eventf(victim, pod, v1.EventTypeNormal, "Preempted", "Preempting", "Preempted by a pod on node %v", c.Name()) } fh.Parallelizer().Until(ctx, len(c.Victims().Pods), preemptPod, ev.PluginName) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime/framework.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime/framework.go index 53e270c2ab7d..1dbce09ff11c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime/framework.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime/framework.go @@ -599,8 +599,10 @@ func (f *frameworkImpl) QueueSortFunc() framework.LessFunc { // RunPreFilterPlugins runs the set of configured PreFilter plugins. It returns // *Status and its code is set to non-success if any of the plugins returns -// anything but Success. If a non-success status is returned, then the scheduling -// cycle is aborted. +// anything but Success/Skip. +// When it returns Skip status, returned PreFilterResult and other fields in status are just ignored, +// and coupled Filter plugin/PreFilterExtensions() will be skipped in this scheduling cycle. +// If a non-success status is returned, then the scheduling cycle is aborted. func (f *frameworkImpl) RunPreFilterPlugins(ctx context.Context, state *framework.CycleState, pod *v1.Pod) (_ *framework.PreFilterResult, status *framework.Status) { startTime := time.Now() defer func() { @@ -608,8 +610,13 @@ func (f *frameworkImpl) RunPreFilterPlugins(ctx context.Context, state *framewor }() var result *framework.PreFilterResult var pluginsWithNodes []string + skipPlugins := sets.New[string]() for _, pl := range f.preFilterPlugins { r, s := f.runPreFilterPlugin(ctx, pl, state, pod) + if s.IsSkip() { + skipPlugins.Insert(pl.Name()) + continue + } if !s.IsSuccess() { s.SetFailedPlugin(pl.Name()) if s.IsUnschedulable() { @@ -628,8 +635,8 @@ func (f *frameworkImpl) RunPreFilterPlugins(ctx context.Context, state *framewor } return nil, framework.NewStatus(framework.Unschedulable, msg) } - } + state.SkipFilterPlugins = skipPlugins return result, nil } @@ -654,7 +661,7 @@ func (f *frameworkImpl) RunPreFilterExtensionAddPod( nodeInfo *framework.NodeInfo, ) (status *framework.Status) { for _, pl := range f.preFilterPlugins { - if pl.PreFilterExtensions() == nil { + if pl.PreFilterExtensions() == nil || state.SkipFilterPlugins.Has(pl.Name()) { continue } status = f.runPreFilterExtensionAddPod(ctx, pl, state, podToSchedule, podInfoToAdd, nodeInfo) @@ -689,7 +696,7 @@ func (f *frameworkImpl) RunPreFilterExtensionRemovePod( nodeInfo *framework.NodeInfo, ) (status *framework.Status) { for _, pl := range f.preFilterPlugins { - if pl.PreFilterExtensions() == nil { + if pl.PreFilterExtensions() == nil || state.SkipFilterPlugins.Has(pl.Name()) { continue } status = f.runPreFilterExtensionRemovePod(ctx, pl, state, podToSchedule, podInfoToRemove, nodeInfo) @@ -722,23 +729,23 @@ func (f *frameworkImpl) RunFilterPlugins( state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo, -) framework.PluginToStatus { - statuses := make(framework.PluginToStatus) +) *framework.Status { for _, pl := range f.filterPlugins { - pluginStatus := f.runFilterPlugin(ctx, pl, state, pod, nodeInfo) - if !pluginStatus.IsSuccess() { - if !pluginStatus.IsUnschedulable() { + if state.SkipFilterPlugins.Has(pl.Name()) { + continue + } + if status := f.runFilterPlugin(ctx, pl, state, pod, nodeInfo); !status.IsSuccess() { + if !status.IsUnschedulable() { // Filter plugins are not supposed to return any status other than // Success or Unschedulable. - errStatus := framework.AsStatus(fmt.Errorf("running %q filter plugin: %w", pl.Name(), pluginStatus.AsError())).WithFailedPlugin(pl.Name()) - return map[string]*framework.Status{pl.Name(): errStatus} + status = framework.AsStatus(fmt.Errorf("running %q filter plugin: %w", pl.Name(), status.AsError())) } - pluginStatus.SetFailedPlugin(pl.Name()) - statuses[pl.Name()] = pluginStatus + status.SetFailedPlugin(pl.Name()) + return status } } - return statuses + return nil } func (f *frameworkImpl) runFilterPlugin(ctx context.Context, pl framework.FilterPlugin, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { @@ -752,30 +759,39 @@ func (f *frameworkImpl) runFilterPlugin(ctx context.Context, pl framework.Filter } // RunPostFilterPlugins runs the set of configured PostFilter plugins until the first -// Success or Error is met, otherwise continues to execute all plugins. +// Success, Error or UnschedulableAndUnresolvable is met; otherwise continues to execute all plugins. func (f *frameworkImpl) RunPostFilterPlugins(ctx context.Context, state *framework.CycleState, pod *v1.Pod, filteredNodeStatusMap framework.NodeToStatusMap) (_ *framework.PostFilterResult, status *framework.Status) { startTime := time.Now() defer func() { metrics.FrameworkExtensionPointDuration.WithLabelValues(postFilter, status.Code().String(), f.profileName).Observe(metrics.SinceInSeconds(startTime)) }() - statuses := make(framework.PluginToStatus) // `result` records the last meaningful(non-noop) PostFilterResult. var result *framework.PostFilterResult + var reasons []string + var failedPlugin string for _, pl := range f.postFilterPlugins { r, s := f.runPostFilterPlugin(ctx, pl, state, pod, filteredNodeStatusMap) if s.IsSuccess() { return r, s + } else if s.Code() == framework.UnschedulableAndUnresolvable { + return r, s.WithFailedPlugin(pl.Name()) } else if !s.IsUnschedulable() { - // Any status other than Success or Unschedulable is Error. - return nil, framework.AsStatus(s.AsError()) + // Any status other than Success, Unschedulable or UnschedulableAndUnresolvable is Error. + return nil, framework.AsStatus(s.AsError()).WithFailedPlugin(pl.Name()) } else if r != nil && r.Mode() != framework.ModeNoop { result = r } - statuses[pl.Name()] = s + + reasons = append(reasons, s.Reasons()...) + // Record the first failed plugin unless we proved that + // the latter is more relevant. + if len(failedPlugin) == 0 { + failedPlugin = pl.Name() + } } - return result, statuses.Merge() + return result, framework.NewStatus(framework.Unschedulable, reasons...).WithFailedPlugin(failedPlugin) } func (f *frameworkImpl) runPostFilterPlugin(ctx context.Context, pl framework.PostFilterPlugin, state *framework.CycleState, pod *v1.Pod, filteredNodeStatusMap framework.NodeToStatusMap) (*framework.PostFilterResult, *framework.Status) { @@ -833,8 +849,7 @@ func (f *frameworkImpl) RunFilterPluginsWithNominatedPods(ctx context.Context, s break } - statusMap := f.RunFilterPlugins(ctx, stateToUse, pod, nodeInfoToUse) - status = statusMap.Merge() + status = f.RunFilterPlugins(ctx, stateToUse, pod, nodeInfoToUse) if !status.IsSuccess() && !status.IsUnschedulable() { return status } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/cache.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/cache.go index 041bf4178a7a..0b60a82f9c2a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/cache.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/cache.go @@ -537,17 +537,22 @@ func (cache *cacheImpl) UpdatePod(oldPod, newPod *v1.Pod) error { defer cache.mu.Unlock() currState, ok := cache.podStates[key] + if !ok { + return fmt.Errorf("pod %v(%v) is not added to scheduler cache, so cannot be updated", key, klog.KObj(oldPod)) + } + // An assumed pod won't have Update/Remove event. It needs to have Add event // before Update event, in which case the state would change from Assumed to Added. - if ok && !cache.assumedPods.Has(key) { - if currState.pod.Spec.NodeName != newPod.Spec.NodeName { - klog.ErrorS(nil, "Pod updated on a different node than previously added to", "podKey", key, "pod", klog.KObj(oldPod)) - klog.ErrorS(nil, "scheduler cache is corrupted and can badly affect scheduling decisions") - klog.FlushAndExit(klog.ExitFlushTimeout, 1) - } - return cache.updatePod(oldPod, newPod) + if cache.assumedPods.Has(key) { + return fmt.Errorf("assumed pod %v(%v) should not be updated", key, klog.KObj(oldPod)) + } + + if currState.pod.Spec.NodeName != newPod.Spec.NodeName { + klog.ErrorS(nil, "Pod updated on a different node than previously added to", "podKey", key, "pod", klog.KObj(oldPod)) + klog.ErrorS(nil, "scheduler cache is corrupted and can badly affect scheduling decisions") + klog.FlushAndExit(klog.ExitFlushTimeout, 1) } - return fmt.Errorf("pod %v(%v) is not added to scheduler cache, so cannot be updated", key, klog.KObj(oldPod)) + return cache.updatePod(oldPod, newPod) } func (cache *cacheImpl) RemovePod(pod *v1.Pod) error { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/schedule_one.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/schedule_one.go index 4a45f5b4adba..cc0fbc73725d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/schedule_one.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/schedule_one.go @@ -92,9 +92,9 @@ func (sched *Scheduler) scheduleOne(ctx context.Context) { schedulingCycleCtx, cancel := context.WithCancel(ctx) defer cancel() - scheduleResult, assumedPodInfo, err := sched.schedulingCycle(schedulingCycleCtx, state, fwk, podInfo, start, podsToActivate) - if err != nil { - sched.FailureHandler(schedulingCycleCtx, fwk, assumedPodInfo, err, scheduleResult.reason, scheduleResult.nominatingInfo, start) + scheduleResult, assumedPodInfo, status := sched.schedulingCycle(schedulingCycleCtx, state, fwk, podInfo, start, podsToActivate) + if !status.IsSuccess() { + sched.FailureHandler(schedulingCycleCtx, fwk, assumedPodInfo, status, scheduleResult.nominatingInfo, start) return } @@ -125,42 +125,48 @@ func (sched *Scheduler) schedulingCycle( podInfo *framework.QueuedPodInfo, start time.Time, podsToActivate *framework.PodsToActivate, -) (ScheduleResult, *framework.QueuedPodInfo, error) { - +) (ScheduleResult, *framework.QueuedPodInfo, *framework.Status) { pod := podInfo.Pod scheduleResult, err := sched.SchedulePod(ctx, fwk, state, pod) if err != nil { + if err == ErrNoNodesAvailable { + status := framework.NewStatus(framework.UnschedulableAndUnresolvable).WithError(err) + return ScheduleResult{nominatingInfo: clearNominatedNode}, podInfo, status + } + + fitError, ok := err.(*framework.FitError) + if !ok { + klog.ErrorS(err, "Error selecting node for pod", "pod", klog.KObj(pod)) + return ScheduleResult{nominatingInfo: clearNominatedNode}, podInfo, framework.AsStatus(err) + } + // SchedulePod() may have failed because the pod would not fit on any host, so we try to // preempt, with the expectation that the next time the pod is tried for scheduling it // will fit due to the preemption. It is also possible that a different pod will schedule // into the resources that were preempted, but this is harmless. - var nominatingInfo *framework.NominatingInfo - reason := v1.PodReasonUnschedulable - if fitError, ok := err.(*framework.FitError); ok { - if !fwk.HasPostFilterPlugins() { - klog.V(3).InfoS("No PostFilter plugins are registered, so no preemption will be performed") - } else { - // Run PostFilter plugins to try to make the pod schedulable in a future scheduling cycle. - result, status := fwk.RunPostFilterPlugins(ctx, state, pod, fitError.Diagnosis.NodeToStatusMap) - if status.Code() == framework.Error { - klog.ErrorS(nil, "Status after running PostFilter plugins for pod", "pod", klog.KObj(pod), "status", status) - } else { - fitError.Diagnosis.PostFilterMsg = status.Message() - klog.V(5).InfoS("Status after running PostFilter plugins for pod", "pod", klog.KObj(pod), "status", status) - } - if result != nil { - nominatingInfo = result.NominatingInfo - } - } - } else if err == ErrNoNodesAvailable { - nominatingInfo = clearNominatedNode + + if !fwk.HasPostFilterPlugins() { + klog.V(3).InfoS("No PostFilter plugins are registered, so no preemption will be performed") + return ScheduleResult{}, podInfo, framework.NewStatus(framework.Unschedulable).WithError(err) + } + + // Run PostFilter plugins to attempt to make the pod schedulable in a future scheduling cycle. + result, status := fwk.RunPostFilterPlugins(ctx, state, pod, fitError.Diagnosis.NodeToStatusMap) + msg := status.Message() + fitError.Diagnosis.PostFilterMsg = msg + if status.Code() == framework.Error { + klog.ErrorS(nil, "Status after running PostFilter plugins for pod", "pod", klog.KObj(pod), "status", msg) } else { - klog.ErrorS(err, "Error selecting node for pod", "pod", klog.KObj(pod)) - nominatingInfo = clearNominatedNode - reason = v1.PodReasonSchedulerError + klog.V(5).InfoS("Status after running PostFilter plugins for pod", "pod", klog.KObj(pod), "status", msg) } - return ScheduleResult{nominatingInfo: nominatingInfo, reason: reason}, podInfo, err + + var nominatingInfo *framework.NominatingInfo + if result != nil { + nominatingInfo = result.NominatingInfo + } + return ScheduleResult{nominatingInfo: nominatingInfo}, podInfo, framework.NewStatus(framework.Unschedulable).WithError(err) } + metrics.SchedulingAlgorithmLatency.Observe(metrics.SinceInSeconds(start)) // Tell the cache to assume that a pod now is running on a given node, even though it hasn't been bound yet. // This allows us to keep scheduling without waiting on binding to occur. @@ -174,9 +180,9 @@ func (sched *Scheduler) schedulingCycle( // This relies on the fact that Error will check if the pod has been bound // to a node and if so will not add it back to the unscheduled pods queue // (otherwise this would cause an infinite loop). - return ScheduleResult{nominatingInfo: clearNominatedNode, reason: v1.PodReasonSchedulerError}, + return ScheduleResult{nominatingInfo: clearNominatedNode}, assumedPodInfo, - err + framework.AsStatus(err) } // Run the Reserve method of reserve plugins. @@ -187,9 +193,9 @@ func (sched *Scheduler) schedulingCycle( klog.ErrorS(forgetErr, "Scheduler cache ForgetPod failed") } - return ScheduleResult{nominatingInfo: clearNominatedNode, reason: v1.PodReasonSchedulerError}, + return ScheduleResult{nominatingInfo: clearNominatedNode}, assumedPodInfo, - sts.AsError() + sts } // Run "permit" plugins. @@ -201,14 +207,9 @@ func (sched *Scheduler) schedulingCycle( klog.ErrorS(forgetErr, "Scheduler cache ForgetPod failed") } - reason := v1.PodReasonSchedulerError - if runPermitStatus.IsUnschedulable() { - reason = v1.PodReasonUnschedulable - } - - return ScheduleResult{nominatingInfo: clearNominatedNode, reason: reason}, + return ScheduleResult{nominatingInfo: clearNominatedNode}, assumedPodInfo, - runPermitStatus.AsError() + runPermitStatus } // At the end of a successful scheduling cycle, pop and move up Pods if needed. @@ -297,12 +298,7 @@ func (sched *Scheduler) handleBindingCycleError( } } - reason := v1.PodReasonSchedulerError - if status.IsUnschedulable() { - reason = v1.PodReasonUnschedulable - } - - sched.FailureHandler(ctx, fwk, podInfo, status.AsError(), reason, clearNominatedNode, start) + sched.FailureHandler(ctx, fwk, podInfo, status, clearNominatedNode, start) } func (sched *Scheduler) frameworkForPod(pod *v1.Pod) (framework.Framework, error) { @@ -406,8 +402,9 @@ func (sched *Scheduler) findNodesThatFitPod(ctx context.Context, fwk framework.F return nil, diagnosis, s.AsError() } // Record the messages from PreFilter in Diagnosis.PreFilterMsg. - diagnosis.PreFilterMsg = s.Message() - klog.V(5).InfoS("Status after running PreFilter plugins for pod", "pod", klog.KObj(pod), "status", s) + msg := s.Message() + diagnosis.PreFilterMsg = msg + klog.V(5).InfoS("Status after running PreFilter plugins for pod", "pod", klog.KObj(pod), "status", msg) // Status satisfying IsUnschedulable() gets injected into diagnosis.UnschedulablePlugins. if s.FailedPlugin() != "" { diagnosis.UnschedulablePlugins.Insert(s.FailedPlugin()) @@ -642,15 +639,15 @@ func prioritizeNodes( state *framework.CycleState, pod *v1.Pod, nodes []*v1.Node, -) (framework.NodeScoreList, error) { +) ([]framework.NodePluginScores, error) { // If no priority configs are provided, then all nodes will have a score of one. // This is required to generate the priority list in the required format if len(extenders) == 0 && !fwk.HasScorePlugins() { - result := make(framework.NodeScoreList, 0, len(nodes)) + result := make([]framework.NodePluginScores, 0, len(nodes)) for i := range nodes { - result = append(result, framework.NodeScore{ - Name: nodes[i].Name, - Score: 1, + result = append(result, framework.NodePluginScores{ + Name: nodes[i].Name, + TotalScore: 1, }) } return result, nil @@ -678,16 +675,12 @@ func prioritizeNodes( } } - // Summarize all scores. - result := make(framework.NodeScoreList, len(nodes)) - for i, pluginScores := range nodesScores { - result[i] = framework.NodeScore{Name: nodes[i].Name, Score: pluginScores.TotalScore} - } - if len(extenders) != 0 && nodes != nil { + // allNodeExtendersScores has all extenders scores for all nodes. + // It is keyed with node name. + allNodeExtendersScores := make(map[string]*framework.NodePluginScores, len(nodes)) var mu sync.Mutex var wg sync.WaitGroup - combinedScores := make(map[string]int64, len(nodes)) for i := range extenders { if !extenders[i].IsInterested(pod) { continue @@ -708,48 +701,65 @@ func prioritizeNodes( return } mu.Lock() + defer mu.Unlock() for i := range *prioritizedList { - host, score := (*prioritizedList)[i].Host, (*prioritizedList)[i].Score + nodename := (*prioritizedList)[i].Host + score := (*prioritizedList)[i].Score if klogV.Enabled() { - klogV.InfoS("Extender scored node for pod", "pod", klog.KObj(pod), "extender", extenders[extIndex].Name(), "node", host, "score", score) + klogV.InfoS("Extender scored node for pod", "pod", klog.KObj(pod), "extender", extenders[extIndex].Name(), "node", nodename, "score", score) } - combinedScores[host] += score * weight + + // MaxExtenderPriority may diverge from the max priority used in the scheduler and defined by MaxNodeScore, + // therefore we need to scale the score returned by extenders to the score range used by the scheduler. + finalscore := score * weight * (framework.MaxNodeScore / extenderv1.MaxExtenderPriority) + + if allNodeExtendersScores[nodename] == nil { + allNodeExtendersScores[nodename] = &framework.NodePluginScores{ + Name: nodename, + Scores: make([]framework.PluginScore, 0, len(extenders)), + } + } + allNodeExtendersScores[nodename].Scores = append(allNodeExtendersScores[nodename].Scores, framework.PluginScore{ + Name: extenders[extIndex].Name(), + Score: finalscore, + }) + allNodeExtendersScores[nodename].TotalScore += finalscore } - mu.Unlock() }(i) } // wait for all go routines to finish wg.Wait() - for i := range result { - // MaxExtenderPriority may diverge from the max priority used in the scheduler and defined by MaxNodeScore, - // therefore we need to scale the score returned by extenders to the score range used by the scheduler. - result[i].Score += combinedScores[result[i].Name] * (framework.MaxNodeScore / extenderv1.MaxExtenderPriority) + for i := range nodesScores { + if score, ok := allNodeExtendersScores[nodes[i].Name]; ok { + nodesScores[i].Scores = append(nodesScores[i].Scores, score.Scores...) + nodesScores[i].TotalScore += score.TotalScore + } } } if klogV.Enabled() { - for i := range result { - klogV.InfoS("Calculated node's final score for pod", "pod", klog.KObj(pod), "node", result[i].Name, "score", result[i].Score) + for i := range nodesScores { + klogV.InfoS("Calculated node's final score for pod", "pod", klog.KObj(pod), "node", nodesScores[i].Name, "score", nodesScores[i].TotalScore) } } - return result, nil + return nodesScores, nil } // selectHost takes a prioritized list of nodes and then picks one // in a reservoir sampling manner from the nodes that had the highest score. -func selectHost(nodeScoreList framework.NodeScoreList) (string, error) { - if len(nodeScoreList) == 0 { +func selectHost(nodeScores []framework.NodePluginScores) (string, error) { + if len(nodeScores) == 0 { return "", fmt.Errorf("empty priorityList") } - maxScore := nodeScoreList[0].Score - selected := nodeScoreList[0].Name + maxScore := nodeScores[0].TotalScore + selected := nodeScores[0].Name cntOfMaxScore := 1 - for _, ns := range nodeScoreList[1:] { - if ns.Score > maxScore { - maxScore = ns.Score + for _, ns := range nodeScores[1:] { + if ns.TotalScore > maxScore { + maxScore = ns.TotalScore selected = ns.Name cntOfMaxScore = 1 - } else if ns.Score == maxScore { + } else if ns.TotalScore == maxScore { cntOfMaxScore++ if rand.Intn(cntOfMaxScore) == 0 { // Replace the candidate with probability of 1/cntOfMaxScore @@ -833,7 +843,12 @@ func getAttemptsLabel(p *framework.QueuedPodInfo) string { // handleSchedulingFailure records an event for the pod that indicates the // pod has failed to schedule. Also, update the pod condition and nominated node name if set. -func (sched *Scheduler) handleSchedulingFailure(ctx context.Context, fwk framework.Framework, podInfo *framework.QueuedPodInfo, err error, reason string, nominatingInfo *framework.NominatingInfo, start time.Time) { +func (sched *Scheduler) handleSchedulingFailure(ctx context.Context, fwk framework.Framework, podInfo *framework.QueuedPodInfo, status *framework.Status, nominatingInfo *framework.NominatingInfo, start time.Time) { + reason := v1.PodReasonSchedulerError + if status.IsUnschedulable() { + reason = v1.PodReasonUnschedulable + } + switch reason { case v1.PodReasonUnschedulable: metrics.PodUnschedulable(fwk.ProfileName(), metrics.SinceInSeconds(start)) @@ -842,12 +857,11 @@ func (sched *Scheduler) handleSchedulingFailure(ctx context.Context, fwk framewo } pod := podInfo.Pod - var errMsg string - if err != nil { - errMsg = err.Error() - } + err := status.AsError() + errMsg := status.Message() + if err == ErrNoNodesAvailable { - klog.V(2).InfoS("Unable to schedule pod; no nodes are registered to the cluster; waiting", "pod", klog.KObj(pod)) + klog.V(2).InfoS("Unable to schedule pod; no nodes are registered to the cluster; waiting", "pod", klog.KObj(pod), "err", err) } else if fitError, ok := err.(*framework.FitError); ok { // Inject UnschedulablePlugins to PodInfo, which will be used later for moving Pods between queues efficiently. podInfo.UnschedulablePlugins = fitError.Diagnosis.UnschedulablePlugins diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/scheduler.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/scheduler.go index 581c792a7cfc..03bd947a3257 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/scheduler.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/scheduler.go @@ -132,9 +132,6 @@ type ScheduleResult struct { EvaluatedNodes int // The number of nodes out of the evaluated ones that fit the pod. FeasibleNodes int - - // The reason records the failure in scheduling cycle. - reason string // The nominating info for scheduling cycle. nominatingInfo *framework.NominatingInfo } @@ -430,7 +427,7 @@ func buildExtenders(extenders []schedulerapi.Extender, profiles []schedulerapi.K return fExtenders, nil } -type FailureHandlerFn func(ctx context.Context, fwk framework.Framework, podInfo *framework.QueuedPodInfo, err error, reason string, nominatingInfo *framework.NominatingInfo, start time.Time) +type FailureHandlerFn func(ctx context.Context, fwk framework.Framework, podInfo *framework.QueuedPodInfo, status *framework.Status, nominatingInfo *framework.NominatingInfo, start time.Time) func unionedGVKs(m map[framework.ClusterEvent]sets.String) map[framework.GVK]framework.ActionType { gvkMap := make(map[framework.GVK]framework.ActionType) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/security/apparmor/helpers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/security/apparmor/helpers.go index 010710e904bb..55203a470dba 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/security/apparmor/helpers.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/security/apparmor/helpers.go @@ -42,21 +42,3 @@ func GetProfileName(pod *v1.Pod, containerName string) string { func GetProfileNameFromPodAnnotations(annotations map[string]string, containerName string) string { return annotations[v1.AppArmorBetaContainerAnnotationKeyPrefix+containerName] } - -// SetProfileName sets the name of the profile to use with the container. -func SetProfileName(pod *v1.Pod, containerName, profileName string) error { - if pod.Annotations == nil { - pod.Annotations = map[string]string{} - } - pod.Annotations[v1.AppArmorBetaContainerAnnotationKeyPrefix+containerName] = profileName - return nil -} - -// SetProfileNameFromPodAnnotations sets the name of the profile to use with the container. -func SetProfileNameFromPodAnnotations(annotations map[string]string, containerName, profileName string) error { - if annotations == nil { - return nil - } - annotations[v1.AppArmorBetaContainerAnnotationKeyPrefix+containerName] = profileName - return nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/securitycontext/util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/securitycontext/util.go index 82a2fc5e0a90..b9610d992f63 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/securitycontext/util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/securitycontext/util.go @@ -20,30 +20,6 @@ import ( v1 "k8s.io/api/core/v1" ) -// HasPrivilegedRequest returns the value of SecurityContext.Privileged, taking into account -// the possibility of nils -func HasPrivilegedRequest(container *v1.Container) bool { - if container.SecurityContext == nil { - return false - } - if container.SecurityContext.Privileged == nil { - return false - } - return *container.SecurityContext.Privileged -} - -// HasCapabilitiesRequest returns true if Adds or Drops are defined in the security context -// capabilities, taking into account nils -func HasCapabilitiesRequest(container *v1.Container) bool { - if container.SecurityContext == nil { - return false - } - if container.SecurityContext.Capabilities == nil { - return false - } - return len(container.SecurityContext.Capabilities.Add) > 0 || len(container.SecurityContext.Capabilities.Drop) > 0 -} - // HasWindowsHostProcessRequest returns true if container should run as HostProcess container, // taking into account nils func HasWindowsHostProcessRequest(pod *v1.Pod, container *v1.Container) bool { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/async/bounded_frequency_runner.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/async/bounded_frequency_runner.go index 5a719dfaee61..e00b66df1b78 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/async/bounded_frequency_runner.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/async/bounded_frequency_runner.go @@ -135,7 +135,7 @@ var _ timer = &realTimer{} // runs are "accumulated" over time, one per minInterval up to burstRuns total. // This can be used, for example, to mitigate the impact of expensive operations // being called in response to user-initiated operations. Run requests that -// would violate the minInterval are coallesced and run at the next opportunity. +// would violate the minInterval are coalesced and run at the next opportunity. // // The function will be run at least once per maxInterval. For example, this can // force periodic refreshes of state in the absence of anyone calling Run. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/conntrack/conntrack.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/conntrack/conntrack.go index 4dc27d347e40..3a140c83df22 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/conntrack/conntrack.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/conntrack/conntrack.go @@ -72,12 +72,6 @@ func Exec(execer exec.Interface, parameters ...string) error { return nil } -// Exists returns true if conntrack binary is installed. -func Exists(execer exec.Interface) bool { - _, err := execer.LookPath("conntrack") - return err == nil -} - // ClearEntriesForPort uses the conntrack tool to delete the conntrack entries // for connections specified by the port. // When a packet arrives, it will not go through NAT table again, because it is not "the first" packet. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/iptables/iptables.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/iptables/iptables.go index 5df8c354da80..0d8135e32979 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/iptables/iptables.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/iptables/iptables.go @@ -218,7 +218,7 @@ type runner struct { func newInternal(exec utilexec.Interface, protocol Protocol, lockfilePath14x, lockfilePath16x string) Interface { version, err := getIPTablesVersion(exec, protocol) if err != nil { - klog.Warningf("Error checking iptables version, assuming version at least %s: %v", MinCheckVersion, err) + klog.InfoS("Error checking iptables version, assuming version at least", "version", MinCheckVersion, "err", err) version = MinCheckVersion } @@ -355,7 +355,7 @@ func (runner *runner) SaveInto(table Table, buffer *bytes.Buffer) error { // run and return iptablesSaveCmd := iptablesSaveCommand(runner.protocol) args := []string{"-t", string(table)} - klog.V(4).Infof("running %s %v", iptablesSaveCmd, args) + klog.V(4).InfoS("Running", "command", iptablesSaveCmd, "arguments", args) cmd := runner.exec.Command(iptablesSaveCmd, args...) cmd.SetStdout(buffer) stderrBuffer := bytes.NewBuffer(nil) @@ -412,7 +412,7 @@ func (runner *runner) restoreInternal(args []string, data []byte, flush FlushFla trace.Step("Locks grabbed") defer func(locker iptablesLocker) { if err := locker.Close(); err != nil { - klog.Errorf("Failed to close iptables locks: %v", err) + klog.ErrorS(err, "Failed to close iptables locks") } }(locker) } @@ -420,7 +420,7 @@ func (runner *runner) restoreInternal(args []string, data []byte, flush FlushFla // run the command and return the output or an error including the output and error fullArgs := append(runner.restoreWaitFlag, args...) iptablesRestoreCmd := iptablesRestoreCommand(runner.protocol) - klog.V(4).Infof("running %s %v", iptablesRestoreCmd, fullArgs) + klog.V(4).InfoS("Running", "command", iptablesRestoreCmd, "arguments", fullArgs) cmd := runner.exec.Command(iptablesRestoreCmd, fullArgs...) cmd.SetStdin(bytes.NewBuffer(data)) b, err := cmd.CombinedOutput() @@ -464,7 +464,7 @@ func (runner *runner) runContext(ctx context.Context, op operation, args []strin iptablesCmd := iptablesCommand(runner.protocol) fullArgs := append(runner.waitFlag, string(op)) fullArgs = append(fullArgs, args...) - klog.V(5).Infof("running iptables: %s %v", iptablesCmd, fullArgs) + klog.V(5).InfoS("Running", "command", iptablesCmd, "arguments", fullArgs) if ctx == nil { return runner.exec.Command(iptablesCmd, fullArgs...).CombinedOutput() } @@ -492,7 +492,7 @@ func trimhex(s string) string { // of hack and half-measures. We should nix this ASAP. func (runner *runner) checkRuleWithoutCheck(table Table, chain Chain, args ...string) (bool, error) { iptablesSaveCmd := iptablesSaveCommand(runner.protocol) - klog.V(1).Infof("running %s -t %s", iptablesSaveCmd, string(table)) + klog.V(1).InfoS("Running", "command", iptablesSaveCmd, "table", string(table)) out, err := runner.exec.Command(iptablesSaveCmd, "-t", string(table)).CombinedOutput() if err != nil { return false, fmt.Errorf("error checking rule: %v", err) @@ -531,7 +531,7 @@ func (runner *runner) checkRuleWithoutCheck(table Table, chain Chain, args ...st if sets.NewString(fields...).IsSuperset(argset) { return true, nil } - klog.V(5).Infof("DBG: fields is not a superset of args: fields=%v args=%v", fields, args) + klog.V(5).InfoS("DBG: fields is not a superset of args", "fields", fields, "arguments", args) } return false, nil @@ -572,7 +572,7 @@ func (runner *runner) Monitor(canary Chain, tables []Table, reloadFunc func(), i _ = utilwait.PollImmediateUntil(interval, func() (bool, error) { for _, table := range tables { if _, err := runner.EnsureChain(table, canary); err != nil { - klog.Warningf("Could not set up iptables canary %s/%s: %v", string(table), string(canary), err) + klog.ErrorS(err, "Could not set up iptables canary", "table", table, "chain", canary) return false, nil } } @@ -584,11 +584,10 @@ func (runner *runner) Monitor(canary Chain, tables []Table, reloadFunc func(), i if exists, err := runner.ChainExists(tables[0], canary); exists { return false, nil } else if isResourceError(err) { - klog.Warningf("Could not check for iptables canary %s/%s: %v", string(tables[0]), string(canary), err) + klog.ErrorS(err, "Could not check for iptables canary", "table", tables[0], "chain", canary) return false, nil } - klog.V(2).Infof("iptables canary %s/%s deleted", string(tables[0]), string(canary)) - + klog.V(2).InfoS("IPTables canary deleted", "table", tables[0], "chain", canary) // Wait for the other canaries to be deleted too before returning // so we don't start reloading too soon. err := utilwait.PollImmediate(iptablesFlushPollTime, iptablesFlushTimeout, func() (bool, error) { @@ -600,7 +599,7 @@ func (runner *runner) Monitor(canary Chain, tables []Table, reloadFunc func(), i return true, nil }) if err != nil { - klog.Warning("Inconsistent iptables state detected.") + klog.InfoS("Inconsistent iptables state detected") } return true, nil }, stopCh) @@ -613,7 +612,7 @@ func (runner *runner) Monitor(canary Chain, tables []Table, reloadFunc func(), i return } - klog.V(2).Infof("Reloading after iptables flush") + klog.V(2).InfoS("Reloading after iptables flush") reloadFunc() } } @@ -694,11 +693,11 @@ func getIPTablesRestoreWaitFlag(version *utilversion.Version, exec utilexec.Inte // --version, assume it also supports --wait vstring, err := getIPTablesRestoreVersionString(exec, protocol) if err != nil || vstring == "" { - klog.V(3).Infof("couldn't get iptables-restore version; assuming it doesn't support --wait") + klog.V(3).InfoS("Couldn't get iptables-restore version; assuming it doesn't support --wait") return nil } if _, err := utilversion.ParseGeneric(vstring); err != nil { - klog.V(3).Infof("couldn't parse iptables-restore version; assuming it doesn't support --wait") + klog.V(3).InfoS("Couldn't parse iptables-restore version; assuming it doesn't support --wait") return nil } return []string{WaitString} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs.go index 419c5f46af0c..2bb08e430a5f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs.go @@ -21,8 +21,6 @@ import ( "strconv" "strings" "time" - - "k8s.io/apimachinery/pkg/util/version" ) // Interface is an injectable interface for running ipvs commands. Implementations must be goroutine-safe. @@ -71,22 +69,6 @@ const ( FlagHashed = 0x2 ) -// IPVS required kernel modules. -const ( - // KernelModuleIPVS is the kernel module "ip_vs" - KernelModuleIPVS string = "ip_vs" - // KernelModuleIPVSRR is the kernel module "ip_vs_rr" - KernelModuleIPVSRR string = "ip_vs_rr" - // KernelModuleIPVSWRR is the kernel module "ip_vs_wrr" - KernelModuleIPVSWRR string = "ip_vs_wrr" - // KernelModuleIPVSSH is the kernel module "ip_vs_sh" - KernelModuleIPVSSH string = "ip_vs_sh" - // KernelModuleNfConntrackIPV4 is the module "nf_conntrack_ipv4" - KernelModuleNfConntrackIPV4 string = "nf_conntrack_ipv4" - // KernelModuleNfConntrack is the kernel module "nf_conntrack" - KernelModuleNfConntrack string = "nf_conntrack" -) - // Equal check the equality of virtual server. // We don't use struct == since it doesn't work because of slice. func (svc *VirtualServer) Equal(other *VirtualServer) bool { @@ -122,17 +104,6 @@ func (rs *RealServer) Equal(other *RealServer) bool { rs.Port == other.Port } -// GetRequiredIPVSModules returns the required ipvs modules for the given linux kernel version. -func GetRequiredIPVSModules(kernelVersion *version.Version) []string { - // "nf_conntrack_ipv4" has been removed since v4.19 - // see https://github.com/torvalds/linux/commit/a0ae2562c6c4b2721d9fddba63b7286c13517d9f - if kernelVersion.LessThan(version.MustParseGeneric("4.19")) { - return []string{KernelModuleIPVS, KernelModuleIPVSRR, KernelModuleIPVSWRR, KernelModuleIPVSSH, KernelModuleNfConntrackIPV4} - } - return []string{KernelModuleIPVS, KernelModuleIPVSRR, KernelModuleIPVSWRR, KernelModuleIPVSSH, KernelModuleNfConntrack} - -} - // IsRsGracefulTerminationNeeded returns true if protocol requires graceful termination for the stale connections func IsRsGracefulTerminationNeeded(proto string) bool { return !strings.EqualFold(proto, "UDP") && !strings.EqualFold(proto, "SCTP") diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cephfs/cephfs.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cephfs/cephfs.go index c4480e8447a8..e6243dba1203 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cephfs/cephfs.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cephfs/cephfs.go @@ -368,7 +368,7 @@ func (cephfsVolume *cephfs) execFuseMount(mountpoint string) error { return err } - err = writer.Write(payload) + err = writer.Write(payload, nil /*setPerms*/) if err != nil { klog.Errorf("failed to write payload to dir: %v", err) return err diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap.go index cdfba9480cd7..7a1e5e58178b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap.go @@ -249,17 +249,17 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterA return err } - err = writer.Write(payload) + setPerms := func(_ string) error { + // This may be the first time writing and new files get created outside the timestamp subdirectory: + // change the permissions on the whole volume and not only in the timestamp directory. + return volume.SetVolumeOwnership(b, mounterArgs.FsGroup, nil /*fsGroupChangePolicy*/, volumeutil.FSGroupCompleteHook(b.plugin, nil)) + } + err = writer.Write(payload, setPerms) if err != nil { klog.Errorf("Error writing payload to dir: %v", err) return err } - err = volume.SetVolumeOwnership(b, mounterArgs.FsGroup, nil /*fsGroupChangePolicy*/, volumeutil.FSGroupCompleteHook(b.plugin, nil)) - if err != nil { - klog.Errorf("Error applying volume ownership settings for group: %v", mounterArgs.FsGroup) - return err - } setupSuccess = true return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go index 50b0bdc2235f..9a77fb715662 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go @@ -505,6 +505,15 @@ func setMigrationAnnotation(migratedPlugins map[string](func() bool), nodeInfo * return true } +// Returns true if and only if new maxAttachLimit doesn't require CSINode update +func keepAllocatableCount(driverInfoSpec storagev1.CSINodeDriver, maxAttachLimit int64) bool { + if maxAttachLimit == 0 { + return driverInfoSpec.Allocatable == nil || driverInfoSpec.Allocatable.Count == nil + } + + return driverInfoSpec.Allocatable != nil && driverInfoSpec.Allocatable.Count != nil && int64(*driverInfoSpec.Allocatable.Count) == maxAttachLimit +} + func (nim *nodeInfoManager) installDriverToCSINode( nodeInfo *storagev1.CSINode, driverName string, @@ -528,7 +537,8 @@ func (nim *nodeInfoManager) installDriverToCSINode( for _, driverInfoSpec := range nodeInfo.Spec.Drivers { if driverInfoSpec.Name == driverName { if driverInfoSpec.NodeID == driverNodeID && - sets.NewString(driverInfoSpec.TopologyKeys...).Equal(topologyKeys) { + sets.NewString(driverInfoSpec.TopologyKeys...).Equal(topologyKeys) && + keepAllocatableCount(driverInfoSpec, maxAttachLimit) { specModified = false } } else { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go index 8338850ac42e..b13e6ea6015c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go @@ -220,15 +220,14 @@ func (b *downwardAPIVolumeMounter) SetUpAt(dir string, mounterArgs volume.Mounte return err } - err = writer.Write(data) - if err != nil { - klog.Errorf("Error writing payload to dir: %v", err) - return err + setPerms := func(_ string) error { + // This may be the first time writing and new files get created outside the timestamp subdirectory: + // change the permissions on the whole volume and not only in the timestamp directory. + return volume.SetVolumeOwnership(b, mounterArgs.FsGroup, nil /*fsGroupChangePolicy*/, volumeutil.FSGroupCompleteHook(b.plugin, nil)) } - - err = volume.SetVolumeOwnership(b, mounterArgs.FsGroup, nil /*fsGroupChangePolicy*/, volumeutil.FSGroupCompleteHook(b.plugin, nil)) + err = writer.Write(data, setPerms) if err != nil { - klog.Errorf("Error applying volume ownership settings for group: %v", mounterArgs.FsGroup) + klog.Errorf("Error writing payload to dir: %v", err) return err } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/hostpath/host_path.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/hostpath/host_path.go index b6e36bd3cce4..88c0da17c63d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/hostpath/host_path.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/hostpath/host_path.go @@ -21,17 +21,19 @@ import ( "os" "regexp" - "k8s.io/mount-utils" + "github.com/opencontainers/selinux/go-selinux" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/hostutil" "k8s.io/kubernetes/pkg/volume/util/recyclerclient" "k8s.io/kubernetes/pkg/volume/validation" + "k8s.io/mount-utils" ) // ProbeVolumePlugins is the primary entrypoint for volume plugins. @@ -322,7 +324,17 @@ func (r *hostPathProvisioner) Provision(selectedNode *v1.Node, allowedTopologies pv.Spec.AccessModes = r.plugin.GetAccessModes() } - return pv, os.MkdirAll(pv.Spec.HostPath.Path, 0750) + if err := os.MkdirAll(pv.Spec.HostPath.Path, 0750); err != nil { + return nil, err + } + if selinux.GetEnabled() { + err := selinux.SetFileLabel(pv.Spec.HostPath.Path, config.KubeletContainersSharedSELinuxLabel) + if err != nil { + return nil, fmt.Errorf("failed to set selinux label for %q: %v", pv.Spec.HostPath.Path, err) + } + } + + return pv, nil } // hostPathDeleter deletes a hostPath PV from the cluster. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/local/local.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/local/local.go index c55a3502e79f..ca0bc3040021 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/local/local.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/local/local.go @@ -418,7 +418,7 @@ func (plugin *localVolumePlugin) NodeExpand(resizeOptions volume.NodeResizeOptio case hostutil.FileTypeDirectory: // if the given local volume path is of already filesystem directory, return directly because // we do not want to prevent mount operation from succeeding. - klog.InfoS("Expansion of directory based local volumes is NO-OP", "local-volume-path", localDevicePath) + klog.InfoS("Expansion of directory based local volumes is NO-OP", "localVolumePath", localDevicePath) return true, nil default: return false, fmt.Errorf("only directory and block device are supported") diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/projected/projected.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/projected/projected.go index a48567b2a6a0..c82b38653e37 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/projected/projected.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/projected/projected.go @@ -230,17 +230,17 @@ func (s *projectedVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterA return err } - err = writer.Write(data) + setPerms := func(_ string) error { + // This may be the first time writing and new files get created outside the timestamp subdirectory: + // change the permissions on the whole volume and not only in the timestamp directory. + return volume.SetVolumeOwnership(s, mounterArgs.FsGroup, nil /*fsGroupChangePolicy*/, volumeutil.FSGroupCompleteHook(s.plugin, nil)) + } + err = writer.Write(data, setPerms) if err != nil { klog.Errorf("Error writing payload to dir: %v", err) return err } - err = volume.SetVolumeOwnership(s, mounterArgs.FsGroup, nil /*fsGroupChangePolicy*/, volumeutil.FSGroupCompleteHook(s.plugin, nil)) - if err != nil { - klog.Errorf("Error applying volume ownership settings for group: %v", mounterArgs.FsGroup) - return err - } setupSuccess = true return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/secret.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/secret.go index fa06d879303e..f43f1bffa3b9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/secret.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/secret.go @@ -244,17 +244,17 @@ func (b *secretVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs return err } - err = writer.Write(payload) + setPerms := func(_ string) error { + // This may be the first time writing and new files get created outside the timestamp subdirectory: + // change the permissions on the whole volume and not only in the timestamp directory. + return volume.SetVolumeOwnership(b, mounterArgs.FsGroup, nil /*fsGroupChangePolicy*/, volumeutil.FSGroupCompleteHook(b.plugin, nil)) + } + err = writer.Write(payload, setPerms) if err != nil { klog.Errorf("Error writing payload to dir: %v", err) return err } - err = volume.SetVolumeOwnership(b, mounterArgs.FsGroup, nil /*fsGroupChangePolicy*/, volumeutil.FSGroupCompleteHook(b.plugin, nil)) - if err != nil { - klog.Errorf("Error applying volume ownership settings for group: %v", mounterArgs.FsGroup) - return err - } setupSuccess = true return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go index 94428f6ffc8b..91ee77a9f690 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go @@ -86,11 +86,16 @@ const ( // Write does an atomic projection of the given payload into the writer's target // directory. Input paths must not begin with '..'. +// setPerms is an optional pointer to a function that caller can provide to set the +// permissions of the newly created files before they are published. The function is +// passed subPath which is the name of the timestamped directory that was created +// under target directory. // // The Write algorithm is: // // 1. The payload is validated; if the payload is invalid, the function returns -// 2.  The current timestamped directory is detected by reading the data directory +// +// 2. The current timestamped directory is detected by reading the data directory // symlink // // 3. The old version of the volume is walked to determine whether any @@ -98,13 +103,19 @@ const ( // // 4. The data in the current timestamped directory is compared to the projected // data to determine if an update is required. -// 5.  A new timestamped dir is created // -// 6. The payload is written to the new timestamped directory -// 7.  A symlink to the new timestamped directory ..data_tmp is created that will -// become the new data directory -// 8.  The new data directory symlink is renamed to the data directory; rename is atomic -// 9.  Symlinks and directory for new user-visible files are created (if needed). +// 5. A new timestamped dir is created. +// +// 6. The payload is written to the new timestamped directory. +// +// 7. Permissions are set (if setPerms is not nil) on the new timestamped directory and files. +// +// 8. A symlink to the new timestamped directory ..data_tmp is created that will +// become the new data directory. +// +// 9. The new data directory symlink is renamed to the data directory; rename is atomic. +// +// 10. Symlinks and directory for new user-visible files are created (if needed). // // For example, consider the files: // /podName @@ -123,9 +134,10 @@ const ( // linking everything else. On Windows, if a target does not exist, the created symlink // will not work properly if the target ends up being a directory. // -// 10. Old paths are removed from the user-visible portion of the target directory -// 11.  The previous timestamped directory is removed, if it exists -func (w *AtomicWriter) Write(payload map[string]FileProjection) error { +// 11. Old paths are removed from the user-visible portion of the target directory. +// +// 12. The previous timestamped directory is removed, if it exists. +func (w *AtomicWriter) Write(payload map[string]FileProjection, setPerms func(subPath string) error) error { // (1) cleanPayload, err := validatePayload(payload) if err != nil { @@ -185,6 +197,14 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error { klog.V(4).Infof("%s: performed write of new data to ts data directory: %s", w.logContext, tsDir) // (7) + if setPerms != nil { + if err := setPerms(tsDirName); err != nil { + klog.Errorf("%s: error applying ownership settings: %v", w.logContext, err) + return err + } + } + + // (8) newDataDirPath := filepath.Join(w.targetDir, newDataDirName) if err = os.Symlink(tsDirName, newDataDirPath); err != nil { os.RemoveAll(tsDir) @@ -192,7 +212,7 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error { return err } - // (8) + // (9) if runtime.GOOS == "windows" { os.Remove(dataDirPath) err = os.Symlink(tsDirName, dataDirPath) @@ -207,19 +227,19 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error { return err } - // (9) + // (10) if err = w.createUserVisibleFiles(cleanPayload); err != nil { klog.Errorf("%s: error creating visible symlinks in %s: %v", w.logContext, w.targetDir, err) return err } - // (10) + // (11) if err = w.removeUserVisiblePaths(pathsToRemove); err != nil { klog.Errorf("%s: error removing old visible symlinks: %v", w.logContext, err) return err } - // (11) + // (12) if len(oldTsDir) > 0 { if err = os.RemoveAll(oldTsPath); err != nil { klog.Errorf("%s: error removing old data directory %s: %v", w.logContext, oldTsDir, err) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/resize_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/resize_util.go index d070f2f29628..d6d028b0d928 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/resize_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/resize_util.go @@ -229,23 +229,6 @@ func MarkFSResizeFinished( return updatedPVC, err } -func MarkControllerExpansionFailed(pvc *v1.PersistentVolumeClaim, kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) { - expansionFailedOnController := v1.PersistentVolumeClaimControllerExpansionFailed - newPVC := pvc.DeepCopy() - newPVC.Status.ResizeStatus = &expansionFailedOnController - patchBytes, err := createPVCPatch(pvc, newPVC, false /* addResourceVersionCheck */) - if err != nil { - return pvc, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %v", pvc.Name, err) - } - - updatedClaim, updateErr := kubeClient.CoreV1().PersistentVolumeClaims(pvc.Namespace). - Patch(context.TODO(), pvc.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status") - if updateErr != nil { - return pvc, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %v", pvc.Name, updateErr) - } - return updatedClaim, nil -} - // MarkNodeExpansionFailed marks a PVC for node expansion as failed. Kubelet should not retry expansion // of volumes which are in failed state. func MarkNodeExpansionFailed(pvc *v1.PersistentVolumeClaim, kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/util.go index f6f5a3f99606..bc33f5f2d424 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/util.go @@ -576,6 +576,44 @@ func IsLocalEphemeralVolume(volume v1.Volume) bool { volume.ConfigMap != nil } +// GetLocalPersistentVolumeNodeNames returns the node affinity node name(s) for +// local PersistentVolumes. nil is returned if the PV does not have any +// specific node affinity node selector terms and match expressions. +// PersistentVolume with node affinity has select and match expressions +// in the form of: +// +// nodeAffinity: +// required: +// nodeSelectorTerms: +// - matchExpressions: +// - key: kubernetes.io/hostname +// operator: In +// values: +// - +// - +func GetLocalPersistentVolumeNodeNames(pv *v1.PersistentVolume) []string { + if pv == nil || pv.Spec.NodeAffinity == nil || pv.Spec.NodeAffinity.Required == nil { + return nil + } + + var result sets.Set[string] + for _, term := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms { + var nodes sets.Set[string] + for _, matchExpr := range term.MatchExpressions { + if matchExpr.Key == v1.LabelHostname && matchExpr.Operator == v1.NodeSelectorOpIn { + if nodes == nil { + nodes = sets.New(matchExpr.Values...) + } else { + nodes = nodes.Intersection(sets.New(matchExpr.Values...)) + } + } + } + result = result.Union(nodes) + } + + return sets.List(result) +} + // GetPodVolumeNames returns names of volumes that are used in a pod, // either as filesystem mount or raw block device, together with list // of all SELinux contexts of all containers that use the volumes. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/runners.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/runners.go index 344e7fc54c7e..2c3d3015ab6e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/runners.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/runners.go @@ -65,17 +65,17 @@ func removePtr(replicas *int32) int32 { return *replicas } -func WaitUntilPodIsScheduled(c clientset.Interface, name, namespace string, timeout time.Duration) (*v1.Pod, error) { +func WaitUntilPodIsScheduled(ctx context.Context, c clientset.Interface, name, namespace string, timeout time.Duration) (*v1.Pod, error) { // Wait until it's scheduled - p, err := c.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{ResourceVersion: "0"}) + p, err := c.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{ResourceVersion: "0"}) if err == nil && p.Spec.NodeName != "" { return p, nil } pollingPeriod := 200 * time.Millisecond startTime := time.Now() - for startTime.Add(timeout).After(time.Now()) { + for startTime.Add(timeout).After(time.Now()) && ctx.Err() == nil { time.Sleep(pollingPeriod) - p, err := c.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{ResourceVersion: "0"}) + p, err := c.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{ResourceVersion: "0"}) if err == nil && p.Spec.NodeName != "" { return p, nil } @@ -83,13 +83,13 @@ func WaitUntilPodIsScheduled(c clientset.Interface, name, namespace string, time return nil, fmt.Errorf("timed out after %v when waiting for pod %v/%v to start", timeout, namespace, name) } -func RunPodAndGetNodeName(c clientset.Interface, pod *v1.Pod, timeout time.Duration) (string, error) { +func RunPodAndGetNodeName(ctx context.Context, c clientset.Interface, pod *v1.Pod, timeout time.Duration) (string, error) { name := pod.Name namespace := pod.Namespace if err := CreatePodWithRetries(c, namespace, pod); err != nil { return "", err } - p, err := WaitUntilPodIsScheduled(c, name, namespace, timeout) + p, err := WaitUntilPodIsScheduled(ctx, c, name, namespace, timeout) if err != nil { return "", err } @@ -173,8 +173,8 @@ type RCConfig struct { LogFunc func(fmt string, args ...interface{}) // If set those functions will be used to gather data from Nodes - in integration tests where no // kubelets are running those variables should be nil. - NodeDumpFunc func(c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{})) - ContainerDumpFunc func(c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{})) + NodeDumpFunc func(ctx context.Context, c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{})) + ContainerDumpFunc func(ctx context.Context, c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{})) // Names of the secrets and configmaps to mount. SecretNames []string @@ -288,16 +288,16 @@ func Diff(oldPods []*v1.Pod, curPods []*v1.Pod) PodDiff { // and will wait for all pods it spawns to become "Running". // It's the caller's responsibility to clean up externally (i.e. use the // namespace lifecycle for handling Cleanup). -func RunDeployment(config DeploymentConfig) error { +func RunDeployment(ctx context.Context, config DeploymentConfig) error { err := config.create() if err != nil { return err } - return config.start() + return config.start(ctx) } -func (config *DeploymentConfig) Run() error { - return RunDeployment(*config) +func (config *DeploymentConfig) Run(ctx context.Context) error { + return RunDeployment(ctx, *config) } func (config *DeploymentConfig) GetKind() schema.GroupKind { @@ -374,16 +374,16 @@ func (config *DeploymentConfig) create() error { // and waits until all the pods it launches to reach the "Running" state. // It's the caller's responsibility to clean up externally (i.e. use the // namespace lifecycle for handling Cleanup). -func RunReplicaSet(config ReplicaSetConfig) error { +func RunReplicaSet(ctx context.Context, config ReplicaSetConfig) error { err := config.create() if err != nil { return err } - return config.start() + return config.start(ctx) } -func (config *ReplicaSetConfig) Run() error { - return RunReplicaSet(*config) +func (config *ReplicaSetConfig) Run(ctx context.Context) error { + return RunReplicaSet(ctx, *config) } func (config *ReplicaSetConfig) GetKind() schema.GroupKind { @@ -456,16 +456,16 @@ func (config *ReplicaSetConfig) create() error { // and will wait for all pods it spawns to become "Running". // It's the caller's responsibility to clean up externally (i.e. use the // namespace lifecycle for handling Cleanup). -func RunJob(config JobConfig) error { +func RunJob(ctx context.Context, config JobConfig) error { err := config.create() if err != nil { return err } - return config.start() + return config.start(ctx) } -func (config *JobConfig) Run() error { - return RunJob(*config) +func (config *JobConfig) Run(ctx context.Context) error { + return RunJob(ctx, *config) } func (config *JobConfig) GetKind() schema.GroupKind { @@ -530,16 +530,16 @@ func (config *JobConfig) create() error { // and will wait for all pods it spawns to become "Running". // It's the caller's responsibility to clean up externally (i.e. use the // namespace lifecycle for handling Cleanup). -func RunRC(config RCConfig) error { +func RunRC(ctx context.Context, config RCConfig) error { err := config.create() if err != nil { return err } - return config.start() + return config.start(ctx) } -func (config *RCConfig) Run() error { - return RunRC(*config) +func (config *RCConfig) Run(ctx context.Context) error { + return RunRC(ctx, *config) } func (config *RCConfig) GetName() string { @@ -776,7 +776,7 @@ func ComputeRCStartupStatus(pods []*v1.Pod, expected int) RCStartupStatus { return startupStatus } -func (config *RCConfig) start() error { +func (config *RCConfig) start(ctx context.Context) error { // Don't force tests to fail if they don't care about containers restarting. var maxContainerFailures int if config.MaxContainerFailures == nil { @@ -824,11 +824,11 @@ func (config *RCConfig) start() error { if startupStatus.FailedContainers > maxContainerFailures { if config.NodeDumpFunc != nil { - config.NodeDumpFunc(config.Client, startupStatus.ContainerRestartNodes.List(), config.RCConfigLog) + config.NodeDumpFunc(ctx, config.Client, startupStatus.ContainerRestartNodes.List(), config.RCConfigLog) } if config.ContainerDumpFunc != nil { // Get the logs from the failed containers to help diagnose what caused them to fail - config.ContainerDumpFunc(config.Client, config.Namespace, config.RCConfigLog) + config.ContainerDumpFunc(ctx, config.Client, config.Namespace, config.RCConfigLog) } return fmt.Errorf("%d containers failed which is more than allowed %d", startupStatus.FailedContainers, maxContainerFailures) } @@ -858,7 +858,7 @@ func (config *RCConfig) start() error { if oldRunning != config.Replicas { // List only pods from a given replication controller. options := metav1.ListOptions{LabelSelector: label.String()} - if pods, err := config.Client.CoreV1().Pods(config.Namespace).List(context.TODO(), options); err == nil { + if pods, err := config.Client.CoreV1().Pods(config.Namespace).List(ctx, options); err == nil { for _, pod := range pods.Items { config.RCConfigLog("Pod %s\t%s\t%s\t%s", pod.Name, pod.Spec.NodeName, pod.Status.Phase, pod.DeletionTimestamp) } @@ -946,8 +946,8 @@ type CountToStrategy struct { } type TestNodePreparer interface { - PrepareNodes(nextNodeIndex int) error - CleanupNodes() error + PrepareNodes(ctx context.Context, nextNodeIndex int) error + CleanupNodes(ctx context.Context) error } type PrepareNodeStrategy interface { @@ -955,12 +955,12 @@ type PrepareNodeStrategy interface { PreparePatch(node *v1.Node) []byte // Create or modify any objects that depend on the node before the test starts. // Caller will re-try when http.StatusConflict error is returned. - PrepareDependentObjects(node *v1.Node, client clientset.Interface) error + PrepareDependentObjects(ctx context.Context, node *v1.Node, client clientset.Interface) error // Clean up any node modifications after the test finishes. - CleanupNode(node *v1.Node) *v1.Node + CleanupNode(ctx context.Context, node *v1.Node) *v1.Node // Clean up any objects that depend on the node after the test finishes. // Caller will re-try when http.StatusConflict error is returned. - CleanupDependentObjects(nodeName string, client clientset.Interface) error + CleanupDependentObjects(ctx context.Context, nodeName string, client clientset.Interface) error } type TrivialNodePrepareStrategy struct{} @@ -971,16 +971,16 @@ func (*TrivialNodePrepareStrategy) PreparePatch(*v1.Node) []byte { return []byte{} } -func (*TrivialNodePrepareStrategy) CleanupNode(node *v1.Node) *v1.Node { +func (*TrivialNodePrepareStrategy) CleanupNode(ctx context.Context, node *v1.Node) *v1.Node { nodeCopy := *node return &nodeCopy } -func (*TrivialNodePrepareStrategy) PrepareDependentObjects(node *v1.Node, client clientset.Interface) error { +func (*TrivialNodePrepareStrategy) PrepareDependentObjects(ctx context.Context, node *v1.Node, client clientset.Interface) error { return nil } -func (*TrivialNodePrepareStrategy) CleanupDependentObjects(nodeName string, client clientset.Interface) error { +func (*TrivialNodePrepareStrategy) CleanupDependentObjects(ctx context.Context, nodeName string, client clientset.Interface) error { return nil } @@ -1009,7 +1009,7 @@ func (s *LabelNodePrepareStrategy) PreparePatch(*v1.Node) []byte { return []byte(patch) } -func (s *LabelNodePrepareStrategy) CleanupNode(node *v1.Node) *v1.Node { +func (s *LabelNodePrepareStrategy) CleanupNode(ctx context.Context, node *v1.Node) *v1.Node { nodeCopy := node.DeepCopy() if node.Labels != nil && len(node.Labels[s.LabelKey]) != 0 { delete(nodeCopy.Labels, s.LabelKey) @@ -1017,11 +1017,11 @@ func (s *LabelNodePrepareStrategy) CleanupNode(node *v1.Node) *v1.Node { return nodeCopy } -func (*LabelNodePrepareStrategy) PrepareDependentObjects(node *v1.Node, client clientset.Interface) error { +func (*LabelNodePrepareStrategy) PrepareDependentObjects(ctx context.Context, node *v1.Node, client clientset.Interface) error { return nil } -func (*LabelNodePrepareStrategy) CleanupDependentObjects(nodeName string, client clientset.Interface) error { +func (*LabelNodePrepareStrategy) CleanupDependentObjects(ctx context.Context, nodeName string, client clientset.Interface) error { return nil } @@ -1069,7 +1069,7 @@ func (s *NodeAllocatableStrategy) PreparePatch(node *v1.Node) []byte { return patch } -func (s *NodeAllocatableStrategy) CleanupNode(node *v1.Node) *v1.Node { +func (s *NodeAllocatableStrategy) CleanupNode(ctx context.Context, node *v1.Node) *v1.Node { nodeCopy := node.DeepCopy() for name := range s.NodeAllocatable { delete(nodeCopy.Status.Allocatable, name) @@ -1077,7 +1077,7 @@ func (s *NodeAllocatableStrategy) CleanupNode(node *v1.Node) *v1.Node { return nodeCopy } -func (s *NodeAllocatableStrategy) createCSINode(nodeName string, client clientset.Interface) error { +func (s *NodeAllocatableStrategy) createCSINode(ctx context.Context, nodeName string, client clientset.Interface) error { csiNode := &storagev1.CSINode{ ObjectMeta: metav1.ObjectMeta{ Name: nodeName, @@ -1099,7 +1099,7 @@ func (s *NodeAllocatableStrategy) createCSINode(nodeName string, client clientse csiNode.Spec.Drivers = append(csiNode.Spec.Drivers, d) } - _, err := client.StorageV1().CSINodes().Create(context.TODO(), csiNode, metav1.CreateOptions{}) + _, err := client.StorageV1().CSINodes().Create(ctx, csiNode, metav1.CreateOptions{}) if apierrors.IsAlreadyExists(err) { // Something created CSINode instance after we checked it did not exist. // Make the caller to re-try PrepareDependentObjects by returning Conflict error @@ -1108,7 +1108,7 @@ func (s *NodeAllocatableStrategy) createCSINode(nodeName string, client clientse return err } -func (s *NodeAllocatableStrategy) updateCSINode(csiNode *storagev1.CSINode, client clientset.Interface) error { +func (s *NodeAllocatableStrategy) updateCSINode(ctx context.Context, csiNode *storagev1.CSINode, client clientset.Interface) error { for driverName, allocatable := range s.CsiNodeAllocatable { found := false for i, driver := range csiNode.Spec.Drivers { @@ -1129,23 +1129,23 @@ func (s *NodeAllocatableStrategy) updateCSINode(csiNode *storagev1.CSINode, clie } csiNode.Annotations[v1.MigratedPluginsAnnotationKey] = strings.Join(s.MigratedPlugins, ",") - _, err := client.StorageV1().CSINodes().Update(context.TODO(), csiNode, metav1.UpdateOptions{}) + _, err := client.StorageV1().CSINodes().Update(ctx, csiNode, metav1.UpdateOptions{}) return err } -func (s *NodeAllocatableStrategy) PrepareDependentObjects(node *v1.Node, client clientset.Interface) error { - csiNode, err := client.StorageV1().CSINodes().Get(context.TODO(), node.Name, metav1.GetOptions{}) +func (s *NodeAllocatableStrategy) PrepareDependentObjects(ctx context.Context, node *v1.Node, client clientset.Interface) error { + csiNode, err := client.StorageV1().CSINodes().Get(ctx, node.Name, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { - return s.createCSINode(node.Name, client) + return s.createCSINode(ctx, node.Name, client) } return err } - return s.updateCSINode(csiNode, client) + return s.updateCSINode(ctx, csiNode, client) } -func (s *NodeAllocatableStrategy) CleanupDependentObjects(nodeName string, client clientset.Interface) error { - csiNode, err := client.StorageV1().CSINodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) +func (s *NodeAllocatableStrategy) CleanupDependentObjects(ctx context.Context, nodeName string, client clientset.Interface) error { + csiNode, err := client.StorageV1().CSINodes().Get(ctx, nodeName, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { return nil @@ -1160,7 +1160,7 @@ func (s *NodeAllocatableStrategy) CleanupDependentObjects(nodeName string, clien } } } - return s.updateCSINode(csiNode, client) + return s.updateCSINode(ctx, csiNode, client) } // UniqueNodeLabelStrategy sets a unique label for each node. @@ -1182,7 +1182,7 @@ func (s *UniqueNodeLabelStrategy) PreparePatch(*v1.Node) []byte { return []byte(patch) } -func (s *UniqueNodeLabelStrategy) CleanupNode(node *v1.Node) *v1.Node { +func (s *UniqueNodeLabelStrategy) CleanupNode(ctx context.Context, node *v1.Node) *v1.Node { nodeCopy := node.DeepCopy() if node.Labels != nil && len(node.Labels[s.LabelKey]) != 0 { delete(nodeCopy.Labels, s.LabelKey) @@ -1190,22 +1190,22 @@ func (s *UniqueNodeLabelStrategy) CleanupNode(node *v1.Node) *v1.Node { return nodeCopy } -func (*UniqueNodeLabelStrategy) PrepareDependentObjects(node *v1.Node, client clientset.Interface) error { +func (*UniqueNodeLabelStrategy) PrepareDependentObjects(ctx context.Context, node *v1.Node, client clientset.Interface) error { return nil } -func (*UniqueNodeLabelStrategy) CleanupDependentObjects(nodeName string, client clientset.Interface) error { +func (*UniqueNodeLabelStrategy) CleanupDependentObjects(ctx context.Context, nodeName string, client clientset.Interface) error { return nil } -func DoPrepareNode(client clientset.Interface, node *v1.Node, strategy PrepareNodeStrategy) error { +func DoPrepareNode(ctx context.Context, client clientset.Interface, node *v1.Node, strategy PrepareNodeStrategy) error { var err error patch := strategy.PreparePatch(node) if len(patch) == 0 { return nil } for attempt := 0; attempt < retries; attempt++ { - if _, err = client.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.MergePatchType, []byte(patch), metav1.PatchOptions{}); err == nil { + if _, err = client.CoreV1().Nodes().Patch(ctx, node.Name, types.MergePatchType, []byte(patch), metav1.PatchOptions{}); err == nil { break } if !apierrors.IsConflict(err) { @@ -1218,7 +1218,7 @@ func DoPrepareNode(client clientset.Interface, node *v1.Node, strategy PrepareNo } for attempt := 0; attempt < retries; attempt++ { - if err = strategy.PrepareDependentObjects(node, client); err == nil { + if err = strategy.PrepareDependentObjects(ctx, node, client); err == nil { break } if !apierrors.IsConflict(err) { @@ -1232,19 +1232,19 @@ func DoPrepareNode(client clientset.Interface, node *v1.Node, strategy PrepareNo return nil } -func DoCleanupNode(client clientset.Interface, nodeName string, strategy PrepareNodeStrategy) error { +func DoCleanupNode(ctx context.Context, client clientset.Interface, nodeName string, strategy PrepareNodeStrategy) error { var err error for attempt := 0; attempt < retries; attempt++ { var node *v1.Node - node, err = client.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) + node, err = client.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) if err != nil { return fmt.Errorf("skipping cleanup of Node: failed to get Node %v: %v", nodeName, err) } - updatedNode := strategy.CleanupNode(node) + updatedNode := strategy.CleanupNode(ctx, node) if apiequality.Semantic.DeepEqual(node, updatedNode) { return nil } - if _, err = client.CoreV1().Nodes().Update(context.TODO(), updatedNode, metav1.UpdateOptions{}); err == nil { + if _, err = client.CoreV1().Nodes().Update(ctx, updatedNode, metav1.UpdateOptions{}); err == nil { break } if !apierrors.IsConflict(err) { @@ -1257,7 +1257,7 @@ func DoCleanupNode(client clientset.Interface, nodeName string, strategy Prepare } for attempt := 0; attempt < retries; attempt++ { - err = strategy.CleanupDependentObjects(nodeName, client) + err = strategy.CleanupDependentObjects(ctx, nodeName, client) if err == nil { break } @@ -1272,7 +1272,7 @@ func DoCleanupNode(client clientset.Interface, nodeName string, strategy Prepare return nil } -type TestPodCreateStrategy func(client clientset.Interface, namespace string, podCount int) error +type TestPodCreateStrategy func(ctx context.Context, client clientset.Interface, namespace string, podCount int) error type CountToPodStrategy struct { Count int @@ -1304,10 +1304,10 @@ func NewTestPodCreator(client clientset.Interface, config *TestPodCreatorConfig) } } -func (c *TestPodCreator) CreatePods() error { +func (c *TestPodCreator) CreatePods(ctx context.Context) error { for ns, v := range *(c.Config) { for _, countToStrategy := range v { - if err := countToStrategy.Strategy(c.Client, ns, countToStrategy.Count); err != nil { + if err := countToStrategy.Strategy(ctx, c.Client, ns, countToStrategy.Count); err != nil { return err } } @@ -1342,7 +1342,7 @@ func makeCreatePod(client clientset.Interface, namespace string, podTemplate *v1 return nil } -func CreatePod(client clientset.Interface, namespace string, podCount int, podTemplate *v1.Pod) error { +func CreatePod(ctx context.Context, client clientset.Interface, namespace string, podCount int, podTemplate *v1.Pod) error { var createError error lock := sync.Mutex{} createPodFunc := func(i int) { @@ -1354,14 +1354,14 @@ func CreatePod(client clientset.Interface, namespace string, podCount int, podTe } if podCount < 30 { - workqueue.ParallelizeUntil(context.TODO(), podCount, podCount, createPodFunc) + workqueue.ParallelizeUntil(ctx, podCount, podCount, createPodFunc) } else { - workqueue.ParallelizeUntil(context.TODO(), 30, podCount, createPodFunc) + workqueue.ParallelizeUntil(ctx, 30, podCount, createPodFunc) } return createError } -func CreatePodWithPersistentVolume(client clientset.Interface, namespace string, claimTemplate *v1.PersistentVolumeClaim, factory volumeFactory, podTemplate *v1.Pod, count int, bindVolume bool) error { +func CreatePodWithPersistentVolume(ctx context.Context, client clientset.Interface, namespace string, claimTemplate *v1.PersistentVolumeClaim, factory volumeFactory, podTemplate *v1.Pod, count int, bindVolume bool) error { var createError error lock := sync.Mutex{} createPodFunc := func(i int) { @@ -1400,7 +1400,7 @@ func CreatePodWithPersistentVolume(client clientset.Interface, namespace string, } // We need to update statuses separately, as creating pv/pvc resets status to the default one. - if _, err := client.CoreV1().PersistentVolumeClaims(namespace).UpdateStatus(context.TODO(), pvc, metav1.UpdateOptions{}); err != nil { + if _, err := client.CoreV1().PersistentVolumeClaims(namespace).UpdateStatus(ctx, pvc, metav1.UpdateOptions{}); err != nil { lock.Lock() defer lock.Unlock() createError = fmt.Errorf("error updating PVC status: %s", err) @@ -1414,7 +1414,7 @@ func CreatePodWithPersistentVolume(client clientset.Interface, namespace string, return } // We need to update statuses separately, as creating pv/pvc resets status to the default one. - if _, err := client.CoreV1().PersistentVolumes().UpdateStatus(context.TODO(), pv, metav1.UpdateOptions{}); err != nil { + if _, err := client.CoreV1().PersistentVolumes().UpdateStatus(ctx, pv, metav1.UpdateOptions{}); err != nil { lock.Lock() defer lock.Unlock() createError = fmt.Errorf("error updating PV status: %s", err) @@ -1442,9 +1442,9 @@ func CreatePodWithPersistentVolume(client clientset.Interface, namespace string, } if count < 30 { - workqueue.ParallelizeUntil(context.TODO(), count, count, createPodFunc) + workqueue.ParallelizeUntil(ctx, count, count, createPodFunc) } else { - workqueue.ParallelizeUntil(context.TODO(), 30, count, createPodFunc) + workqueue.ParallelizeUntil(ctx, 30, count, createPodFunc) } return createError } @@ -1472,8 +1472,8 @@ func createController(client clientset.Interface, controllerName, namespace stri } func NewCustomCreatePodStrategy(podTemplate *v1.Pod) TestPodCreateStrategy { - return func(client clientset.Interface, namespace string, podCount int) error { - return CreatePod(client, namespace, podCount, podTemplate) + return func(ctx context.Context, client clientset.Interface, namespace string, podCount int) error { + return CreatePod(ctx, client, namespace, podCount, podTemplate) } } @@ -1481,8 +1481,8 @@ func NewCustomCreatePodStrategy(podTemplate *v1.Pod) TestPodCreateStrategy { type volumeFactory func(uniqueID int) *v1.PersistentVolume func NewCreatePodWithPersistentVolumeStrategy(claimTemplate *v1.PersistentVolumeClaim, factory volumeFactory, podTemplate *v1.Pod) TestPodCreateStrategy { - return func(client clientset.Interface, namespace string, podCount int) error { - return CreatePodWithPersistentVolume(client, namespace, claimTemplate, factory, podTemplate, podCount, true /* bindVolume */) + return func(ctx context.Context, client clientset.Interface, namespace string, podCount int) error { + return CreatePodWithPersistentVolume(ctx, client, namespace, claimTemplate, factory, podTemplate, podCount, true /* bindVolume */) } } @@ -1501,7 +1501,7 @@ func makeUnboundPersistentVolumeClaim(storageClass string) *v1.PersistentVolumeC } func NewCreatePodWithPersistentVolumeWithFirstConsumerStrategy(factory volumeFactory, podTemplate *v1.Pod) TestPodCreateStrategy { - return func(client clientset.Interface, namespace string, podCount int) error { + return func(ctx context.Context, client clientset.Interface, namespace string, podCount int) error { volumeBindingMode := storagev1.VolumeBindingWaitForFirstConsumer storageClass := &storagev1.StorageClass{ ObjectMeta: metav1.ObjectMeta{ @@ -1522,7 +1522,7 @@ func NewCreatePodWithPersistentVolumeWithFirstConsumerStrategy(factory volumeFac return pv } - return CreatePodWithPersistentVolume(client, namespace, claimTemplate, factoryWithStorageClass, podTemplate, podCount, false /* bindVolume */) + return CreatePodWithPersistentVolume(ctx, client, namespace, claimTemplate, factoryWithStorageClass, podTemplate, podCount, false /* bindVolume */) } } @@ -1537,7 +1537,7 @@ func NewSimpleCreatePodStrategy() TestPodCreateStrategy { } func NewSimpleWithControllerCreatePodStrategy(controllerName string) TestPodCreateStrategy { - return func(client clientset.Interface, namespace string, podCount int) error { + return func(ctx context.Context, client clientset.Interface, namespace string, podCount int) error { basePod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ GenerateName: controllerName + "-pod-", @@ -1548,7 +1548,7 @@ func NewSimpleWithControllerCreatePodStrategy(controllerName string) TestPodCrea if err := createController(client, controllerName, namespace, podCount, basePod); err != nil { return err } - return CreatePod(client, namespace, podCount, basePod) + return CreatePod(ctx, client, namespace, podCount, basePod) } } @@ -1739,7 +1739,7 @@ type DaemonConfig struct { Timeout time.Duration } -func (config *DaemonConfig) Run() error { +func (config *DaemonConfig) Run(ctx context.Context) error { if config.Image == "" { config.Image = "registry.k8s.io/pause:3.9" } @@ -1775,7 +1775,7 @@ func (config *DaemonConfig) Run() error { var err error for i := 0; i < retries; i++ { // Wait for all daemons to be running - nodes, err = config.Client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{ResourceVersion: "0"}) + nodes, err = config.Client.CoreV1().Nodes().List(ctx, metav1.ListOptions{ResourceVersion: "0"}) if err == nil { break } else if i+1 == retries { diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/aws/aws.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/aws/aws.go index 2aceff30890e..2c18d56b4acf 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/aws/aws.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/aws/aws.go @@ -175,8 +175,7 @@ const ServiceAnnotationLoadBalancerSSLNegotiationPolicy = "service.beta.kubernet // ServiceAnnotationLoadBalancerBEProtocol is the annotation used on the service // to specify the protocol spoken by the backend (pod) behind a listener. // If `http` (default) or `https`, an HTTPS listener that terminates the -// -// connection and parses headers is created. +// connection and parses headers is created. // // If set to `ssl` or `tcp`, a "raw" SSL listener is used. // If set to `http` and `aws-load-balancer-ssl-cert` is not used then @@ -3899,7 +3898,7 @@ func (c *Cloud) buildNLBHealthCheckConfiguration(svc *v1.Service) (healthCheckCo HealthyThreshold: defaultNlbHealthCheckThreshold, UnhealthyThreshold: defaultNlbHealthCheckThreshold, } - if svc.Spec.ExternalTrafficPolicy == v1.ServiceExternalTrafficPolicyTypeLocal { + if svc.Spec.ExternalTrafficPolicy == v1.ServiceExternalTrafficPolicyLocal { path, port := servicehelpers.GetServiceHealthCheckPathPort(svc) hc = healthCheckConfig{ Port: strconv.Itoa(int(port)), diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/aws/aws_loadbalancer.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/aws/aws_loadbalancer.go index 79b9ef1a85d4..3f05aed7f401 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/aws/aws_loadbalancer.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/aws/aws_loadbalancer.go @@ -1244,8 +1244,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala // syncElbListeners computes a plan to reconcile the desired vs actual state of the listeners on an ELB // NOTE: there exists an O(nlgn) implementation for this function. However, as the default limit of -// -// listeners per elb is 100, this implementation is reduced from O(m*n) => O(n). +// listeners per elb is 100, this implementation is reduced from O(m*n) => O(n). func syncElbListeners(loadBalancerName string, listeners []*elb.Listener, listenerDescriptions []*elb.ListenerDescription) ([]*elb.Listener, []*int64) { foundSet := make(map[int]bool) removals := []*int64{} diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_backoff.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_backoff.go index fe8062da98c6..a810f88bee4a 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_backoff.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_backoff.go @@ -26,7 +26,6 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network" - "github.com/Azure/go-autorest/autorest/to" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -36,6 +35,7 @@ import ( "k8s.io/klog/v2" azcache "k8s.io/legacy-cloud-providers/azure/cache" "k8s.io/legacy-cloud-providers/azure/retry" + "k8s.io/utils/pointer" ) const ( @@ -161,7 +161,7 @@ func (az *Cloud) CreateOrUpdateSecurityGroup(sg network.SecurityGroup) error { ctx, cancel := getContextWithCancel() defer cancel() - rerr := az.SecurityGroupsClient.CreateOrUpdate(ctx, az.SecurityGroupResourceGroup, *sg.Name, sg, to.String(sg.Etag)) + rerr := az.SecurityGroupsClient.CreateOrUpdate(ctx, az.SecurityGroupResourceGroup, *sg.Name, sg, pointer.StringDeref(sg.Etag, "")) klog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%s): end", *sg.Name) if rerr == nil { // Invalidate the cache right after updating @@ -219,7 +219,7 @@ func (az *Cloud) CreateOrUpdateLB(service *v1.Service, lb network.LoadBalancer) lb = cleanupSubnetInFrontendIPConfigurations(&lb) rgName := az.getLoadBalancerResourceGroup() - rerr := az.LoadBalancerClient.CreateOrUpdate(ctx, rgName, to.String(lb.Name), lb, to.String(lb.Etag)) + rerr := az.LoadBalancerClient.CreateOrUpdate(ctx, rgName, pointer.StringDeref(lb.Name, ""), lb, pointer.StringDeref(lb.Etag, "")) klog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): end", *lb.Name) if rerr == nil { // Invalidate the cache right after updating @@ -229,14 +229,14 @@ func (az *Cloud) CreateOrUpdateLB(service *v1.Service, lb network.LoadBalancer) // Invalidate the cache because ETAG precondition mismatch. if rerr.HTTPStatusCode == http.StatusPreconditionFailed { - klog.V(3).Infof("LoadBalancer cache for %s is cleanup because of http.StatusPreconditionFailed", to.String(lb.Name)) + klog.V(3).Infof("LoadBalancer cache for %s is cleanup because of http.StatusPreconditionFailed", pointer.StringDeref(lb.Name, "")) az.lbCache.Delete(*lb.Name) } retryErrorMessage := rerr.Error().Error() // Invalidate the cache because another new operation has canceled the current request. if strings.Contains(strings.ToLower(retryErrorMessage), operationCanceledErrorMessage) { - klog.V(3).Infof("LoadBalancer cache for %s is cleanup because CreateOrUpdate is canceled by another operation", to.String(lb.Name)) + klog.V(3).Infof("LoadBalancer cache for %s is cleanup because CreateOrUpdate is canceled by another operation", pointer.StringDeref(lb.Name, "")) az.lbCache.Delete(*lb.Name) } @@ -248,7 +248,7 @@ func (az *Cloud) CreateOrUpdateLB(service *v1.Service, lb network.LoadBalancer) return rerr.Error() } pipRG, pipName := matches[1], matches[2] - klog.V(3).Infof("The public IP %s referenced by load balancer %s is not in Succeeded provisioning state, will try to update it", pipName, to.String(lb.Name)) + klog.V(3).Infof("The public IP %s referenced by load balancer %s is not in Succeeded provisioning state, will try to update it", pipName, pointer.StringDeref(lb.Name, "")) pip, _, err := az.getPublicIPAddress(pipRG, pipName) if err != nil { klog.Warningf("Failed to get the public IP %s in resource group %s: %v", pipName, pipRG, err) @@ -311,10 +311,10 @@ func (az *Cloud) CreateOrUpdatePIP(service *v1.Service, pipResourceGroup string, ctx, cancel := getContextWithCancel() defer cancel() - rerr := az.PublicIPAddressesClient.CreateOrUpdate(ctx, pipResourceGroup, to.String(pip.Name), pip) - klog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): end", pipResourceGroup, to.String(pip.Name)) + rerr := az.PublicIPAddressesClient.CreateOrUpdate(ctx, pipResourceGroup, pointer.StringDeref(pip.Name, ""), pip) + klog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): end", pipResourceGroup, pointer.StringDeref(pip.Name, "")) if rerr != nil { - klog.Errorf("PublicIPAddressesClient.CreateOrUpdate(%s, %s) failed: %s", pipResourceGroup, to.String(pip.Name), rerr.Error().Error()) + klog.Errorf("PublicIPAddressesClient.CreateOrUpdate(%s, %s) failed: %s", pipResourceGroup, pointer.StringDeref(pip.Name, ""), rerr.Error().Error()) az.Event(service, v1.EventTypeWarning, "CreateOrUpdatePublicIPAddress", rerr.Error().Error()) return rerr.Error() } @@ -381,7 +381,7 @@ func (az *Cloud) CreateOrUpdateRouteTable(routeTable network.RouteTable) error { ctx, cancel := getContextWithCancel() defer cancel() - rerr := az.RouteTablesClient.CreateOrUpdate(ctx, az.RouteTableResourceGroup, az.RouteTableName, routeTable, to.String(routeTable.Etag)) + rerr := az.RouteTablesClient.CreateOrUpdate(ctx, az.RouteTableResourceGroup, az.RouteTableName, routeTable, pointer.StringDeref(routeTable.Etag, "")) if rerr == nil { // Invalidate the cache right after updating az.rtCache.Delete(*routeTable.Name) @@ -407,7 +407,7 @@ func (az *Cloud) CreateOrUpdateRoute(route network.Route) error { ctx, cancel := getContextWithCancel() defer cancel() - rerr := az.RoutesClient.CreateOrUpdate(ctx, az.RouteTableResourceGroup, az.RouteTableName, *route.Name, route, to.String(route.Etag)) + rerr := az.RoutesClient.CreateOrUpdate(ctx, az.RouteTableResourceGroup, az.RouteTableName, *route.Name, route, pointer.StringDeref(route.Etag, "")) klog.V(10).Infof("RoutesClient.CreateOrUpdate(%s): end", *route.Name) if rerr == nil { az.rtCache.Delete(az.RouteTableName) diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_blobDiskController.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_blobDiskController.go index 905f71c6c7b4..0cf34c7211ba 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_blobDiskController.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_blobDiskController.go @@ -33,12 +33,12 @@ import ( "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage" azstorage "github.com/Azure/azure-sdk-for-go/storage" - "github.com/Azure/go-autorest/autorest/to" "github.com/rubiojr/go-vhd/vhd" kwait "k8s.io/apimachinery/pkg/util/wait" volerr "k8s.io/cloud-provider/volume/errors" "k8s.io/klog/v2" + "k8s.io/utils/pointer" ) // Attention: blob disk feature is deprecated @@ -504,14 +504,14 @@ func (c *BlobDiskController) createStorageAccount(storageAccountName string, sto Sku: &storage.Sku{Name: storageAccountType}, // switch to use StorageV2 as it's recommended according to https://docs.microsoft.com/en-us/azure/storage/common/storage-account-options Kind: defaultStorageAccountKind, - Tags: map[string]*string{"created-by": to.StringPtr("azure-dd")}, + Tags: map[string]*string{"created-by": pointer.String("azure-dd")}, Location: &location} ctx, cancel := getContextWithCancel() defer cancel() err := c.common.cloud.StorageAccountClient.Create(ctx, c.common.resourceGroup, storageAccountName, cp) if err != nil { - return fmt.Errorf(fmt.Sprintf("Create Storage Account: %s, error: %v", storageAccountName, err)) + return fmt.Errorf("Create Storage Account: %s, error: %v", storageAccountName, err) } newAccountState := &storageAccountState{ diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_standard.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_standard.go index 0b7a5b4ca337..77309e9e2025 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_standard.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_standard.go @@ -24,11 +24,11 @@ import ( "strings" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute" - "github.com/Azure/go-autorest/autorest/to" "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" azcache "k8s.io/legacy-cloud-providers/azure/cache" + "k8s.io/utils/pointer" ) // AttachDisk attaches a vhd to vm @@ -69,7 +69,7 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri Caching: cachingMode, CreateOption: "attach", ManagedDisk: managedDisk, - WriteAcceleratorEnabled: to.BoolPtr(writeAcceleratorEnabled), + WriteAcceleratorEnabled: pointer.Bool(writeAcceleratorEnabled), }) } else { disks = append(disks, @@ -145,7 +145,7 @@ func (as *availabilitySet) DetachDisk(diskName, diskURI string, nodeName types.N if strings.EqualFold(as.cloud.Environment.Name, AzureStackCloudName) { disks = append(disks[:i], disks[i+1:]...) } else { - disks[i].ToBeDetached = to.BoolPtr(true) + disks[i].ToBeDetached = pointer.Bool(true) } bFoundDisk = true break diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go index f932593faa49..a30520332edc 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go @@ -24,11 +24,11 @@ import ( "strings" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute" - "github.com/Azure/go-autorest/autorest/to" "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" azcache "k8s.io/legacy-cloud-providers/azure/cache" + "k8s.io/utils/pointer" ) // AttachDisk attaches a vhd to vm @@ -71,7 +71,7 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod Caching: compute.CachingTypes(cachingMode), CreateOption: "attach", ManagedDisk: managedDisk, - WriteAcceleratorEnabled: to.BoolPtr(writeAcceleratorEnabled), + WriteAcceleratorEnabled: pointer.Bool(writeAcceleratorEnabled), }) } else { disks = append(disks, @@ -147,7 +147,7 @@ func (ss *scaleSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName if strings.EqualFold(ss.cloud.Environment.Name, AzureStackCloudName) { disks = append(disks[:i], disks[i+1:]...) } else { - disks[i].ToBeDetached = to.BoolPtr(true) + disks[i].ToBeDetached = pointer.Bool(true) } bFoundDisk = true break diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go index 13246826f69b..5c82ef14c8e1 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go @@ -30,7 +30,6 @@ import ( "strings" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network" - "github.com/Azure/go-autorest/autorest/to" v1 "k8s.io/api/core/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -42,6 +41,7 @@ import ( "k8s.io/legacy-cloud-providers/azure/metrics" "k8s.io/legacy-cloud-providers/azure/retry" utilnet "k8s.io/utils/net" + "k8s.io/utils/pointer" ) const ( @@ -205,7 +205,7 @@ func (az *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, ser return nil, err } - updateService := updateServiceLoadBalancerIP(service, to.String(serviceIP)) + updateService := updateServiceLoadBalancerIP(service, pointer.StringDeref(serviceIP, "")) flippedService := flipServiceInternalAnnotation(updateService) if _, err := az.reconcileLoadBalancer(clusterName, flippedService, nil, false /* wantLb */); err != nil { klog.Errorf("reconcileLoadBalancer(%s) failed: %#v", serviceName, err) @@ -214,7 +214,7 @@ func (az *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, ser // lb is not reused here because the ETAG may be changed in above operations, hence reconcilePublicIP() would get lb again from cache. klog.V(2).Infof("EnsureLoadBalancer: reconciling pip") - if _, err := az.reconcilePublicIP(clusterName, updateService, to.String(lb.Name), true /* wantLb */); err != nil { + if _, err := az.reconcilePublicIP(clusterName, updateService, pointer.StringDeref(lb.Name, ""), true /* wantLb */); err != nil { klog.Errorf("reconcilePublicIP(%s) failed: %#v", serviceName, err) return nil, err } @@ -294,19 +294,19 @@ func (az *Cloud) getLoadBalancerResourceGroup() string { func (az *Cloud) cleanBackendpoolForPrimarySLB(primarySLB *network.LoadBalancer, service *v1.Service, clusterName string) (*network.LoadBalancer, error) { lbBackendPoolName := getBackendPoolName(clusterName, service) lbResourceGroup := az.getLoadBalancerResourceGroup() - lbBackendPoolID := az.getBackendPoolID(to.String(primarySLB.Name), lbResourceGroup, lbBackendPoolName) + lbBackendPoolID := az.getBackendPoolID(pointer.StringDeref(primarySLB.Name, ""), lbResourceGroup, lbBackendPoolName) newBackendPools := make([]network.BackendAddressPool, 0) if primarySLB.LoadBalancerPropertiesFormat != nil && primarySLB.BackendAddressPools != nil { newBackendPools = *primarySLB.BackendAddressPools } vmSetNameToBackendIPConfigurationsToBeDeleted := make(map[string][]network.InterfaceIPConfiguration) for j, bp := range newBackendPools { - if strings.EqualFold(to.String(bp.Name), lbBackendPoolName) { - klog.V(2).Infof("cleanBackendpoolForPrimarySLB: checking the backend pool %s from standard load balancer %s", to.String(bp.Name), to.String(primarySLB.Name)) + if strings.EqualFold(pointer.StringDeref(bp.Name, ""), lbBackendPoolName) { + klog.V(2).Infof("cleanBackendpoolForPrimarySLB: checking the backend pool %s from standard load balancer %s", pointer.StringDeref(bp.Name, ""), pointer.StringDeref(primarySLB.Name, "")) if bp.BackendAddressPoolPropertiesFormat != nil && bp.BackendIPConfigurations != nil { for i := len(*bp.BackendIPConfigurations) - 1; i >= 0; i-- { ipConf := (*bp.BackendIPConfigurations)[i] - ipConfigID := to.String(ipConf.ID) + ipConfigID := pointer.StringDeref(ipConf.ID, "") _, vmSetName, err := az.VMSet.GetNodeNameByIPConfigurationID(ipConfigID) if err != nil && !errors.Is(err, cloudprovider.InstanceNotFound) { return nil, err @@ -316,7 +316,7 @@ func (az *Cloud) cleanBackendpoolForPrimarySLB(primarySLB *network.LoadBalancer, klog.V(2).Infof("cleanBackendpoolForPrimarySLB: found unwanted vmSet %s, decouple it from the LB", vmSetName) // construct a backendPool that only contains the IP config of the node to be deleted interfaceIPConfigToBeDeleted := network.InterfaceIPConfiguration{ - ID: to.StringPtr(ipConfigID), + ID: pointer.String(ipConfigID), } vmSetNameToBackendIPConfigurationsToBeDeleted[vmSetName] = append(vmSetNameToBackendIPConfigurationsToBeDeleted[vmSetName], interfaceIPConfigToBeDeleted) *bp.BackendIPConfigurations = append((*bp.BackendIPConfigurations)[:i], (*bp.BackendIPConfigurations)[i+1:]...) @@ -330,7 +330,7 @@ func (az *Cloud) cleanBackendpoolForPrimarySLB(primarySLB *network.LoadBalancer, for vmSetName, backendIPConfigurationsToBeDeleted := range vmSetNameToBackendIPConfigurationsToBeDeleted { backendpoolToBeDeleted := &[]network.BackendAddressPool{ { - ID: to.StringPtr(lbBackendPoolID), + ID: pointer.String(lbBackendPoolID), BackendAddressPoolPropertiesFormat: &network.BackendAddressPoolPropertiesFormat{ BackendIPConfigurations: &backendIPConfigurationsToBeDeleted, }, @@ -366,7 +366,7 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string, // check if the service already has a load balancer for i := range existingLBs { existingLB := existingLBs[i] - if strings.EqualFold(to.String(existingLB.Name), clusterName) && useMultipleSLBs { + if strings.EqualFold(pointer.StringDeref(existingLB.Name, ""), clusterName) && useMultipleSLBs { cleanedLB, err := az.cleanBackendpoolForPrimarySLB(&existingLB, service, clusterName) if err != nil { return nil, nil, false, err @@ -509,10 +509,10 @@ func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.L for _, ipConfiguration := range *lb.FrontendIPConfigurations { owns, isPrimaryService, err := az.serviceOwnsFrontendIP(ipConfiguration, service) if err != nil { - return nil, fmt.Errorf("get(%s): lb(%s) - failed to filter frontend IP configs with error: %v", serviceName, to.String(lb.Name), err) + return nil, fmt.Errorf("get(%s): lb(%s) - failed to filter frontend IP configs with error: %v", serviceName, pointer.StringDeref(lb.Name, ""), err) } if owns { - klog.V(2).Infof("get(%s): lb(%s) - found frontend IP config, primary service: %v", serviceName, to.String(lb.Name), isPrimaryService) + klog.V(2).Infof("get(%s): lb(%s) - found frontend IP config, primary service: %v", serviceName, pointer.StringDeref(lb.Name, ""), isPrimaryService) var lbIP *string if isInternal { @@ -538,8 +538,8 @@ func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.L } } - klog.V(2).Infof("getServiceLoadBalancerStatus gets ingress IP %q from frontendIPConfiguration %q for service %q", to.String(lbIP), to.String(ipConfiguration.Name), serviceName) - return &v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: to.String(lbIP)}}}, nil + klog.V(2).Infof("getServiceLoadBalancerStatus gets ingress IP %q from frontendIPConfiguration %q for service %q", pointer.StringDeref(lbIP, ""), pointer.StringDeref(ipConfiguration.Name, ""), serviceName) + return &v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: pointer.StringDeref(lbIP, "")}}}, nil } } @@ -701,14 +701,14 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai if shouldPIPExisted { return nil, fmt.Errorf("PublicIP from annotation azure-pip-name=%s for service %s doesn't exist", pipName, serviceName) } - pip.Name = to.StringPtr(pipName) - pip.Location = to.StringPtr(az.Location) + pip.Name = pointer.String(pipName) + pip.Location = pointer.String(az.Location) pip.PublicIPAddressPropertiesFormat = &network.PublicIPAddressPropertiesFormat{ PublicIPAllocationMethod: network.Static, IPTags: getServiceIPTagRequestForPublicIP(service).IPTags, } pip.Tags = map[string]*string{ - serviceTagKey: to.StringPtr(""), + serviceTagKey: pointer.String(""), clusterNameKey: &clusterName, } if _, err = bindServicesToPIP(&pip, []string{serviceName}, false); err != nil { @@ -724,7 +724,7 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai } if foundDNSLabelAnnotation { if existingServiceName, ok := pip.Tags[serviceUsingDNSKey]; ok { - if !strings.EqualFold(to.String(existingServiceName), serviceName) { + if !strings.EqualFold(pointer.StringDeref(existingServiceName, ""), serviceName) { return nil, fmt.Errorf("ensurePublicIPExists for service(%s): pip(%s) - there is an existing service %s consuming the DNS label on the public IP, so the service cannot set the DNS label annotation with this value", serviceName, pipName, *existingServiceName) } } @@ -740,7 +740,7 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai } } else { existingDNSLabel := pip.PublicIPAddressPropertiesFormat.DNSSettings.DomainNameLabel - if !strings.EqualFold(to.String(existingDNSLabel), domainNameLabel) { + if !strings.EqualFold(pointer.StringDeref(existingDNSLabel, ""), domainNameLabel) { return nil, fmt.Errorf("ensurePublicIPExists for service(%s): pip(%s) - there is an existing DNS label %s on the public IP", serviceName, pipName, *existingDNSLabel) } } @@ -827,8 +827,8 @@ func sortIPTags(ipTags *[]network.IPTag) { if ipTags != nil { sort.Slice(*ipTags, func(i, j int) bool { ipTag := *ipTags - return to.String(ipTag[i].IPTagType) < to.String(ipTag[j].IPTagType) || - to.String(ipTag[i].Tag) < to.String(ipTag[j].Tag) + return pointer.StringDeref(ipTag[i].IPTagType, "") < pointer.StringDeref(ipTag[j].IPTagType, "") || + pointer.StringDeref(ipTag[i].Tag, "") < pointer.StringDeref(ipTag[j].Tag, "") }) } } @@ -860,8 +860,8 @@ func convertIPTagMapToSlice(ipTagMap map[string]string) *[]network.IPTag { outputTags := []network.IPTag{} for k, v := range ipTagMap { ipTag := network.IPTag{ - IPTagType: to.StringPtr(k), - Tag: to.StringPtr(v), + IPTagType: pointer.String(k), + Tag: pointer.String(v), } outputTags = append(outputTags, ipTag) } @@ -873,7 +873,7 @@ func getDomainNameLabel(pip *network.PublicIPAddress) string { if pip == nil || pip.PublicIPAddressPropertiesFormat == nil || pip.PublicIPAddressPropertiesFormat.DNSSettings == nil { return "" } - return to.String(pip.PublicIPAddressPropertiesFormat.DNSSettings.DomainNameLabel) + return pointer.StringDeref(pip.PublicIPAddressPropertiesFormat.DNSSettings.DomainNameLabel, "") } func getIdleTimeout(s *v1.Service) (*int32, error) { @@ -906,10 +906,10 @@ func (az *Cloud) isFrontendIPChanged(clusterName string, config network.Frontend if err != nil { return false, err } - if isServiceOwnsFrontendIP && isPrimaryService && !strings.EqualFold(to.String(config.Name), lbFrontendIPConfigName) { + if isServiceOwnsFrontendIP && isPrimaryService && !strings.EqualFold(pointer.StringDeref(config.Name, ""), lbFrontendIPConfigName) { return true, nil } - if !strings.EqualFold(to.String(config.Name), lbFrontendIPConfigName) { + if !strings.EqualFold(pointer.StringDeref(config.Name, ""), lbFrontendIPConfigName) { return false, nil } loadBalancerIP := service.Spec.LoadBalancerIP @@ -925,14 +925,14 @@ func (az *Cloud) isFrontendIPChanged(clusterName string, config network.Frontend if !existsSubnet { return false, fmt.Errorf("failed to get subnet") } - if config.Subnet != nil && !strings.EqualFold(to.String(config.Subnet.Name), to.String(subnet.Name)) { + if config.Subnet != nil && !strings.EqualFold(pointer.StringDeref(config.Subnet.Name, ""), pointer.StringDeref(subnet.Name, "")) { return true, nil } } if loadBalancerIP == "" { return config.PrivateIPAllocationMethod == network.Static, nil } - return config.PrivateIPAllocationMethod != network.Static || !strings.EqualFold(loadBalancerIP, to.String(config.PrivateIPAddress)), nil + return config.PrivateIPAllocationMethod != network.Static || !strings.EqualFold(loadBalancerIP, pointer.StringDeref(config.PrivateIPAddress, "")), nil } pipName, _, err := az.determinePublicIPName(clusterName, service) if err != nil { @@ -946,7 +946,7 @@ func (az *Cloud) isFrontendIPChanged(clusterName string, config network.Frontend if !existsPip { return true, nil } - return config.PublicIPAddress != nil && !strings.EqualFold(to.String(pip.ID), to.String(config.PublicIPAddress.ID)), nil + return config.PublicIPAddress != nil && !strings.EqualFold(pointer.StringDeref(pip.ID, ""), pointer.StringDeref(config.PublicIPAddress.ID, "")), nil } // isFrontendIPConfigUnsafeToDelete checks if a frontend IP config is safe to be deleted. @@ -1122,7 +1122,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, var backendIPConfigurationsToBeDeleted []network.InterfaceIPConfiguration if bp.BackendAddressPoolPropertiesFormat != nil && bp.BackendIPConfigurations != nil { for _, ipConf := range *bp.BackendIPConfigurations { - ipConfID := to.String(ipConf.ID) + ipConfID := pointer.StringDeref(ipConf.ID, "") nodeName, _, err := az.VMSet.GetNodeNameByIPConfigurationID(ipConfID) if err != nil && !errors.Is(err, cloudprovider.InstanceNotFound) { return nil, err @@ -1137,13 +1137,13 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, if shouldExcludeLoadBalancer { klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - found unwanted node %s, decouple it from the LB", serviceName, wantLb, nodeName) // construct a backendPool that only contains the IP config of the node to be deleted - backendIPConfigurationsToBeDeleted = append(backendIPConfigurationsToBeDeleted, network.InterfaceIPConfiguration{ID: to.StringPtr(ipConfID)}) + backendIPConfigurationsToBeDeleted = append(backendIPConfigurationsToBeDeleted, network.InterfaceIPConfiguration{ID: pointer.String(ipConfID)}) } } if len(backendIPConfigurationsToBeDeleted) > 0 { backendpoolToBeDeleted := &[]network.BackendAddressPool{ { - ID: to.StringPtr(lbBackendPoolID), + ID: pointer.String(lbBackendPoolID), BackendAddressPoolPropertiesFormat: &network.BackendAddressPoolPropertiesFormat{ BackendIPConfigurations: &backendIPConfigurationsToBeDeleted, }, @@ -1172,7 +1172,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, } newBackendPools = append(newBackendPools, network.BackendAddressPool{ - Name: to.StringPtr(lbBackendPoolName), + Name: pointer.String(lbBackendPoolName), }) klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - adding backendpool", serviceName, wantLb) @@ -1295,8 +1295,8 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, newConfigs = append(newConfigs, network.FrontendIPConfiguration{ - Name: to.StringPtr(defaultLBFrontendIPConfigName), - ID: to.StringPtr(fmt.Sprintf(frontendIPConfigIDTemplate, az.SubscriptionID, az.ResourceGroup, *lb.Name, defaultLBFrontendIPConfigName)), + Name: pointer.String(defaultLBFrontendIPConfigName), + ID: pointer.String(fmt.Sprintf(frontendIPConfigIDTemplate, az.SubscriptionID, az.ResourceGroup, *lb.Name, defaultLBFrontendIPConfigName)), FrontendIPConfigurationPropertiesFormat: fipConfigurationProperties, }) klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - adding", serviceName, wantLb, defaultLBFrontendIPConfigName) @@ -1450,7 +1450,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, foundLB := false for _, existingLB := range existingLBs { - if strings.EqualFold(lbName, to.String(existingLB.Name)) { + if strings.EqualFold(lbName, pointer.StringDeref(existingLB.Name, "")) { foundLB = true break } @@ -1627,7 +1627,7 @@ func (az *Cloud) reconcileLoadBalancerRule( var enableTCPReset *bool if az.useStandardLoadBalancer() { - enableTCPReset = to.BoolPtr(true) + enableTCPReset = pointer.Bool(true) } var expectedProbes []network.Probe @@ -1662,11 +1662,11 @@ func (az *Cloud) reconcileLoadBalancerRule( expectedProbes = append(expectedProbes, network.Probe{ Name: &lbRuleName, ProbePropertiesFormat: &network.ProbePropertiesFormat{ - RequestPath: to.StringPtr(requestPath), + RequestPath: pointer.String(requestPath), Protocol: network.ProbeProtocol(probeProtocol), - Port: to.Int32Ptr(podPresencePort), - IntervalInSeconds: to.Int32Ptr(5), - NumberOfProbes: to.Int32Ptr(2), + Port: pointer.Int32(podPresencePort), + IntervalInSeconds: pointer.Int32(5), + NumberOfProbes: pointer.Int32(2), }, }) } else if port.Protocol != v1.ProtocolUDP && port.Protocol != v1.ProtocolSCTP { @@ -1677,9 +1677,9 @@ func (az *Cloud) reconcileLoadBalancerRule( var actualPath *string if !strings.EqualFold(probeProtocol, string(network.ProbeProtocolTCP)) { if requestPath != "" { - actualPath = to.StringPtr(requestPath) + actualPath = pointer.String(requestPath) } else { - actualPath = to.StringPtr("/healthz") + actualPath = pointer.String("/healthz") } } expectedProbes = append(expectedProbes, network.Probe{ @@ -1687,9 +1687,9 @@ func (az *Cloud) reconcileLoadBalancerRule( ProbePropertiesFormat: &network.ProbePropertiesFormat{ Protocol: network.ProbeProtocol(probeProtocol), RequestPath: actualPath, - Port: to.Int32Ptr(port.NodePort), - IntervalInSeconds: to.Int32Ptr(5), - NumberOfProbes: to.Int32Ptr(2), + Port: pointer.Int32(port.NodePort), + IntervalInSeconds: pointer.Int32(5), + NumberOfProbes: pointer.Int32(2), }, }) } @@ -1708,17 +1708,17 @@ func (az *Cloud) reconcileLoadBalancerRule( LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{ Protocol: *transportProto, FrontendIPConfiguration: &network.SubResource{ - ID: to.StringPtr(lbFrontendIPConfigID), + ID: pointer.String(lbFrontendIPConfigID), }, BackendAddressPool: &network.SubResource{ - ID: to.StringPtr(lbBackendPoolID), + ID: pointer.String(lbBackendPoolID), }, LoadDistribution: loadDistribution, - FrontendPort: to.Int32Ptr(port.Port), - BackendPort: to.Int32Ptr(port.Port), - DisableOutboundSnat: to.BoolPtr(az.disableLoadBalancerOutboundSNAT()), + FrontendPort: pointer.Int32(port.Port), + BackendPort: pointer.Int32(port.Port), + DisableOutboundSnat: pointer.Bool(az.disableLoadBalancerOutboundSNAT()), EnableTCPReset: tcpReset, - EnableFloatingIP: to.BoolPtr(true), + EnableFloatingIP: pointer.Bool(true), }, } @@ -1729,8 +1729,8 @@ func (az *Cloud) reconcileLoadBalancerRule( if requiresInternalLoadBalancer(service) && strings.EqualFold(az.LoadBalancerSku, loadBalancerSkuStandard) && strings.EqualFold(service.Annotations[ServiceAnnotationLoadBalancerEnableHighAvailabilityPorts], "true") { - expectedRule.FrontendPort = to.Int32Ptr(0) - expectedRule.BackendPort = to.Int32Ptr(0) + expectedRule.FrontendPort = pointer.Int32(0) + expectedRule.BackendPort = pointer.Int32(0) expectedRule.Protocol = network.TransportProtocolAll highAvailabilityPortsEnabled = true } @@ -1739,7 +1739,7 @@ func (az *Cloud) reconcileLoadBalancerRule( // However, when externalTrafficPolicy is Local, Kubernetes HTTP health check would be used for probing. if servicehelpers.NeedsHealthCheck(service) || (port.Protocol != v1.ProtocolUDP && port.Protocol != v1.ProtocolSCTP) { expectedRule.Probe = &network.SubResource{ - ID: to.StringPtr(az.getLoadBalancerProbeID(lbName, az.getLoadBalancerResourceGroup(), lbRuleName)), + ID: pointer.String(az.getLoadBalancerProbeID(lbName, az.getLoadBalancerResourceGroup(), lbRuleName)), } } @@ -1817,13 +1817,13 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, ix := i*len(sourceAddressPrefixes) + j securityRuleName := az.getSecurityRuleName(service, port, sourceAddressPrefixes[j]) expectedSecurityRules[ix] = network.SecurityRule{ - Name: to.StringPtr(securityRuleName), + Name: pointer.String(securityRuleName), SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{ Protocol: *securityProto, - SourcePortRange: to.StringPtr("*"), - DestinationPortRange: to.StringPtr(strconv.Itoa(int(port.Port))), - SourceAddressPrefix: to.StringPtr(sourceAddressPrefixes[j]), - DestinationAddressPrefix: to.StringPtr(destinationIPAddress), + SourcePortRange: pointer.String("*"), + DestinationPortRange: pointer.String(strconv.Itoa(int(port.Port))), + SourceAddressPrefix: pointer.String(sourceAddressPrefixes[j]), + DestinationAddressPrefix: pointer.String(destinationIPAddress), Access: network.SecurityRuleAccessAllow, Direction: network.SecurityRuleDirectionInbound, }, @@ -1929,7 +1929,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, return nil, err } - expectedRule.Priority = to.Int32Ptr(nextAvailablePriority) + expectedRule.Priority = pointer.Int32(nextAvailablePriority) updatedRules = append(updatedRules, expectedRule) dirtySg = true } @@ -1954,7 +1954,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, return nil, err } klog.V(10).Infof("CreateOrUpdateSecurityGroup(%q): end", *sg.Name) - az.nsgCache.Delete(to.String(sg.Name)) + az.nsgCache.Delete(pointer.StringDeref(sg.Name, "")) } return &sg, nil } @@ -2000,13 +2000,13 @@ func findIndex(strs []string, s string) (int, bool) { } func allowsConsolidation(rule network.SecurityRule) bool { - return strings.HasPrefix(to.String(rule.Name), "shared") + return strings.HasPrefix(pointer.StringDeref(rule.Name, ""), "shared") } func findConsolidationCandidate(rules []network.SecurityRule, rule network.SecurityRule) (int, bool) { for index, r := range rules { if allowsConsolidation(r) { - if strings.EqualFold(to.String(r.Name), to.String(rule.Name)) { + if strings.EqualFold(pointer.StringDeref(r.Name, ""), pointer.StringDeref(rule.Name, "")) { return index, true } } @@ -2320,12 +2320,12 @@ func (az *Cloud) safeDeletePublicIP(service *v1.Service, pipResourceGroup string loadBalancerRuleUpdated := false // Check whether there are still frontend IP configurations referring to it. - ipConfigurationID := to.String(pip.PublicIPAddressPropertiesFormat.IPConfiguration.ID) + ipConfigurationID := pointer.StringDeref(pip.PublicIPAddressPropertiesFormat.IPConfiguration.ID, "") if ipConfigurationID != "" { lbFrontendIPConfigs := *lb.LoadBalancerPropertiesFormat.FrontendIPConfigurations for i := len(lbFrontendIPConfigs) - 1; i >= 0; i-- { config := lbFrontendIPConfigs[i] - if strings.EqualFold(ipConfigurationID, to.String(config.ID)) { + if strings.EqualFold(ipConfigurationID, pointer.StringDeref(config.ID, "")) { if config.FrontendIPConfigurationPropertiesFormat != nil && config.FrontendIPConfigurationPropertiesFormat.LoadBalancingRules != nil { referencedLBRules = *config.FrontendIPConfigurationPropertiesFormat.LoadBalancingRules @@ -2346,13 +2346,13 @@ func (az *Cloud) safeDeletePublicIP(service *v1.Service, pipResourceGroup string if len(referencedLBRules) > 0 { referencedLBRuleIDs := sets.NewString() for _, refer := range referencedLBRules { - referencedLBRuleIDs.Insert(to.String(refer.ID)) + referencedLBRuleIDs.Insert(pointer.StringDeref(refer.ID, "")) } if lb.LoadBalancerPropertiesFormat.LoadBalancingRules != nil { lbRules := *lb.LoadBalancerPropertiesFormat.LoadBalancingRules for i := len(lbRules) - 1; i >= 0; i-- { - ruleID := to.String(lbRules[i].ID) + ruleID := pointer.StringDeref(lbRules[i].ID, "") if ruleID != "" && referencedLBRuleIDs.Has(ruleID) { loadBalancerRuleUpdated = true lbRules = append(lbRules[:i], lbRules[i+1:]...) @@ -2375,7 +2375,7 @@ func (az *Cloud) safeDeletePublicIP(service *v1.Service, pipResourceGroup string } } - pipName := to.String(pip.Name) + pipName := pointer.StringDeref(pip.Name, "") klog.V(10).Infof("DeletePublicIP(%s, %q): start", pipResourceGroup, pipName) err := az.DeletePublicIP(service, pipResourceGroup, pipName) if err != nil { @@ -2388,7 +2388,7 @@ func (az *Cloud) safeDeletePublicIP(service *v1.Service, pipResourceGroup string func findProbe(probes []network.Probe, probe network.Probe) bool { for _, existingProbe := range probes { - if strings.EqualFold(to.String(existingProbe.Name), to.String(probe.Name)) && to.Int32(existingProbe.Port) == to.Int32(probe.Port) { + if strings.EqualFold(pointer.StringDeref(existingProbe.Name, ""), pointer.StringDeref(probe.Name, "")) && pointer.Int32Deref(existingProbe.Port, 0) == pointer.Int32Deref(probe.Port, 0) { return true } } @@ -2397,7 +2397,7 @@ func findProbe(probes []network.Probe, probe network.Probe) bool { func findRule(rules []network.LoadBalancingRule, rule network.LoadBalancingRule, wantLB bool) bool { for _, existingRule := range rules { - if strings.EqualFold(to.String(existingRule.Name), to.String(rule.Name)) && + if strings.EqualFold(pointer.StringDeref(existingRule.Name, ""), pointer.StringDeref(rule.Name, "")) && equalLoadBalancingRulePropertiesFormat(existingRule.LoadBalancingRulePropertiesFormat, rule.LoadBalancingRulePropertiesFormat, wantLB) { return true } @@ -2418,7 +2418,7 @@ func equalLoadBalancingRulePropertiesFormat(s *network.LoadBalancingRuleProperti } if reflect.DeepEqual(s.Protocol, network.TransportProtocolTCP) { - properties = properties && reflect.DeepEqual(to.Bool(s.EnableTCPReset), to.Bool(t.EnableTCPReset)) + properties = properties && reflect.DeepEqual(pointer.BoolDeref(s.EnableTCPReset, false), pointer.BoolDeref(t.EnableTCPReset, false)) } properties = properties && reflect.DeepEqual(s.FrontendIPConfiguration, t.FrontendIPConfiguration) && @@ -2427,7 +2427,7 @@ func equalLoadBalancingRulePropertiesFormat(s *network.LoadBalancingRuleProperti reflect.DeepEqual(s.FrontendPort, t.FrontendPort) && reflect.DeepEqual(s.BackendPort, t.BackendPort) && reflect.DeepEqual(s.EnableFloatingIP, t.EnableFloatingIP) && - reflect.DeepEqual(to.Bool(s.DisableOutboundSnat), to.Bool(t.DisableOutboundSnat)) + reflect.DeepEqual(pointer.BoolDeref(s.DisableOutboundSnat, false), pointer.BoolDeref(t.DisableOutboundSnat, false)) if wantLB && s.IdleTimeoutInMinutes != nil && t.IdleTimeoutInMinutes != nil { return properties && reflect.DeepEqual(s.IdleTimeoutInMinutes, t.IdleTimeoutInMinutes) @@ -2441,23 +2441,23 @@ func equalLoadBalancingRulePropertiesFormat(s *network.LoadBalancingRuleProperti // despite different DestinationAddressPrefixes, in order to give it a chance to consolidate the two rules. func findSecurityRule(rules []network.SecurityRule, rule network.SecurityRule) bool { for _, existingRule := range rules { - if !strings.EqualFold(to.String(existingRule.Name), to.String(rule.Name)) { + if !strings.EqualFold(pointer.StringDeref(existingRule.Name, ""), pointer.StringDeref(rule.Name, "")) { continue } if !strings.EqualFold(string(existingRule.Protocol), string(rule.Protocol)) { continue } - if !strings.EqualFold(to.String(existingRule.SourcePortRange), to.String(rule.SourcePortRange)) { + if !strings.EqualFold(pointer.StringDeref(existingRule.SourcePortRange, ""), pointer.StringDeref(rule.SourcePortRange, "")) { continue } - if !strings.EqualFold(to.String(existingRule.DestinationPortRange), to.String(rule.DestinationPortRange)) { + if !strings.EqualFold(pointer.StringDeref(existingRule.DestinationPortRange, ""), pointer.StringDeref(rule.DestinationPortRange, "")) { continue } - if !strings.EqualFold(to.String(existingRule.SourceAddressPrefix), to.String(rule.SourceAddressPrefix)) { + if !strings.EqualFold(pointer.StringDeref(existingRule.SourceAddressPrefix, ""), pointer.StringDeref(rule.SourceAddressPrefix, "")) { continue } if !allowsConsolidation(existingRule) && !allowsConsolidation(rule) { - if !strings.EqualFold(to.String(existingRule.DestinationAddressPrefix), to.String(rule.DestinationAddressPrefix)) { + if !strings.EqualFold(pointer.StringDeref(existingRule.DestinationAddressPrefix, ""), pointer.StringDeref(rule.DestinationAddressPrefix, "")) { continue } } @@ -2583,7 +2583,7 @@ func serviceOwnsPublicIP(service *v1.Service, pip *network.PublicIPAddress, clus return false, false } - if pip.PublicIPAddressPropertiesFormat == nil || to.String(pip.IPAddress) == "" { + if pip.PublicIPAddressPropertiesFormat == nil || pointer.StringDeref(pip.IPAddress, "") == "" { klog.Warningf("serviceOwnsPublicIP: empty pip.IPAddress") return false, false } @@ -2595,8 +2595,8 @@ func serviceOwnsPublicIP(service *v1.Service, pip *network.PublicIPAddress, clus clusterTag := pip.Tags[clusterNameKey] // if there is no service tag on the pip, it is user-created pip - if to.String(serviceTag) == "" { - return strings.EqualFold(to.String(pip.IPAddress), service.Spec.LoadBalancerIP), true + if pointer.StringDeref(serviceTag, "") == "" { + return strings.EqualFold(pointer.StringDeref(pip.IPAddress, ""), service.Spec.LoadBalancerIP), true } if serviceTag != nil { @@ -2615,7 +2615,7 @@ func serviceOwnsPublicIP(service *v1.Service, pip *network.PublicIPAddress, clus } else { // if the service is not included in te tags of the system-created pip, check the ip address // this could happen for secondary services - return strings.EqualFold(to.String(pip.IPAddress), service.Spec.LoadBalancerIP), false + return strings.EqualFold(pointer.StringDeref(pip.IPAddress, ""), service.Spec.LoadBalancerIP), false } } } @@ -2663,7 +2663,7 @@ func bindServicesToPIP(pip *network.PublicIPAddress, incomingServiceNames []stri } if pip.Tags == nil { - pip.Tags = map[string]*string{serviceTagKey: to.StringPtr("")} + pip.Tags = map[string]*string{serviceTagKey: pointer.String("")} } serviceTagValue := pip.Tags[serviceTagKey] @@ -2673,7 +2673,7 @@ func bindServicesToPIP(pip *network.PublicIPAddress, incomingServiceNames []stri // replace is used when unbinding the service from PIP so addedNew remains false all the time if replace { - serviceTagValue = to.StringPtr(strings.Join(incomingServiceNames, ",")) + serviceTagValue = pointer.String(strings.Join(incomingServiceNames, ",")) pip.Tags[serviceTagKey] = serviceTagValue return false, nil @@ -2687,7 +2687,7 @@ func bindServicesToPIP(pip *network.PublicIPAddress, incomingServiceNames []stri for _, serviceName := range incomingServiceNames { if serviceTagValue == nil || *serviceTagValue == "" { - serviceTagValue = to.StringPtr(serviceName) + serviceTagValue = pointer.String(serviceName) addedNew = true } else { // detect duplicates @@ -2729,7 +2729,7 @@ func unbindServiceFromPIP(pip *network.PublicIPAddress, serviceName string) erro if existingServiceName, ok := pip.Tags[serviceUsingDNSKey]; ok { if strings.EqualFold(*existingServiceName, serviceName) { - pip.Tags[serviceUsingDNSKey] = to.StringPtr("") + pip.Tags[serviceUsingDNSKey] = pointer.String("") } } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_managedDiskController.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_managedDiskController.go index 9eb6ba2acc80..b31f73203356 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_managedDiskController.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_managedDiskController.go @@ -28,7 +28,6 @@ import ( "strings" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute" - "github.com/Azure/go-autorest/autorest/to" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -36,6 +35,7 @@ import ( cloudvolume "k8s.io/cloud-provider/volume" volumehelpers "k8s.io/cloud-provider/volume/helpers" "k8s.io/klog/v2" + "k8s.io/utils/pointer" ) const ( @@ -128,7 +128,7 @@ func (c *ManagedDiskController) CreateManagedDisk(options *ManagedDiskOptions) ( } diskIOPSReadWrite = int64(v) } - diskProperties.DiskIOPSReadWrite = to.Int64Ptr(diskIOPSReadWrite) + diskProperties.DiskIOPSReadWrite = pointer.Int64(diskIOPSReadWrite) diskMBpsReadWrite := int64(defaultDiskMBpsReadWrite) if options.DiskMBpsReadWrite != "" { @@ -138,7 +138,7 @@ func (c *ManagedDiskController) CreateManagedDisk(options *ManagedDiskOptions) ( } diskMBpsReadWrite = int64(v) } - diskProperties.DiskMBpsReadWrite = to.Int64Ptr(diskMBpsReadWrite) + diskProperties.DiskMBpsReadWrite = pointer.Int64(diskMBpsReadWrite) } else { if options.DiskIOPSReadWrite != "" { return "", fmt.Errorf("AzureDisk - DiskIOPSReadWrite parameter is only applicable in UltraSSD_LRS disk type") @@ -308,7 +308,7 @@ func (c *ManagedDiskController) ResizeDisk(diskURI string, oldSize resource.Quan } if result.DiskProperties.DiskState != compute.Unattached { - return oldSize, fmt.Errorf("azureDisk - disk resize is only supported on Unattached disk, current disk state: %s, already attached to %s", result.DiskProperties.DiskState, to.String(result.ManagedBy)) + return oldSize, fmt.Errorf("azureDisk - disk resize is only supported on Unattached disk, current disk state: %s, already attached to %s", result.DiskProperties.DiskState, pointer.StringDeref(result.ManagedBy, "")) } diskParameter := compute.DiskUpdate{ diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_routes.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_routes.go index 900160ff4cc1..f26dfef449f4 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_routes.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_routes.go @@ -27,7 +27,6 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network" - "github.com/Azure/go-autorest/autorest/to" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" @@ -36,6 +35,7 @@ import ( azcache "k8s.io/legacy-cloud-providers/azure/cache" "k8s.io/legacy-cloud-providers/azure/metrics" utilnet "k8s.io/utils/net" + "k8s.io/utils/pointer" ) var ( @@ -173,13 +173,13 @@ func (d *delayedRouteUpdater) updateRoutes() { routeMatch := false onlyUpdateTags = false for i, existingRoute := range routes { - if strings.EqualFold(to.String(existingRoute.Name), to.String(rt.route.Name)) { + if strings.EqualFold(pointer.StringDeref(existingRoute.Name, ""), pointer.StringDeref(rt.route.Name, "")) { // delete the name-matched routes here (missing routes would be added later if the operation is add). routes = append(routes[:i], routes[i+1:]...) if existingRoute.RoutePropertiesFormat != nil && rt.route.RoutePropertiesFormat != nil && - strings.EqualFold(to.String(existingRoute.AddressPrefix), to.String(rt.route.AddressPrefix)) && - strings.EqualFold(to.String(existingRoute.NextHopIPAddress), to.String(rt.route.NextHopIPAddress)) { + strings.EqualFold(pointer.StringDeref(existingRoute.AddressPrefix, ""), pointer.StringDeref(rt.route.AddressPrefix, "")) && + strings.EqualFold(pointer.StringDeref(existingRoute.NextHopIPAddress, ""), pointer.StringDeref(rt.route.NextHopIPAddress, "")) { routeMatch = true } if rt.operation == routeOperationDelete { @@ -216,7 +216,7 @@ func (d *delayedRouteUpdater) updateRoutes() { // and deletes all dualstack routes when dualstack is not enabled. func (d *delayedRouteUpdater) cleanupOutdatedRoutes(existingRoutes []network.Route) (routes []network.Route, changed bool) { for i := len(existingRoutes) - 1; i >= 0; i-- { - existingRouteName := to.String(existingRoutes[i].Name) + existingRouteName := pointer.StringDeref(existingRoutes[i].Name, "") split := strings.Split(existingRouteName, routeNameSeparator) klog.V(4).Infof("cleanupOutdatedRoutes: checking route %s", existingRouteName) @@ -299,7 +299,7 @@ func (az *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudpr // ensure the route table is tagged as configured tags, changed := az.ensureRouteTableTagged(&routeTable) if changed { - klog.V(2).Infof("ListRoutes: updating tags on route table %s", to.String(routeTable.Name)) + klog.V(2).Infof("ListRoutes: updating tags on route table %s", pointer.StringDeref(routeTable.Name, "")) op, err := az.routeUpdater.addUpdateRouteTableTagsOperation(routeTableOperationUpdateTags, tags) if err != nil { klog.Errorf("ListRoutes: failed to add route table operation with error: %v", err) @@ -348,8 +348,8 @@ func processRoutes(ipv6DualStackEnabled bool, routeTable network.RouteTable, exi func (az *Cloud) createRouteTable() error { routeTable := network.RouteTable{ - Name: to.StringPtr(az.RouteTableName), - Location: to.StringPtr(az.Location), + Name: pointer.String(az.RouteTableName), + Location: pointer.String(az.Location), RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{}, } @@ -420,11 +420,11 @@ func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint s } routeName := mapNodeNameToRouteName(az.ipv6DualStackEnabled, kubeRoute.TargetNode, string(kubeRoute.DestinationCIDR)) route := network.Route{ - Name: to.StringPtr(routeName), + Name: pointer.String(routeName), RoutePropertiesFormat: &network.RoutePropertiesFormat{ - AddressPrefix: to.StringPtr(kubeRoute.DestinationCIDR), + AddressPrefix: pointer.String(kubeRoute.DestinationCIDR), NextHopType: network.RouteNextHopTypeVirtualAppliance, - NextHopIPAddress: to.StringPtr(targetIP), + NextHopIPAddress: pointer.String(targetIP), }, } @@ -474,7 +474,7 @@ func (az *Cloud) DeleteRoute(ctx context.Context, clusterName string, kubeRoute routeName := mapNodeNameToRouteName(az.ipv6DualStackEnabled, kubeRoute.TargetNode, string(kubeRoute.DestinationCIDR)) klog.V(2).Infof("DeleteRoute: deleting route. clusterName=%q instance=%q cidr=%q routeName=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR, routeName) route := network.Route{ - Name: to.StringPtr(routeName), + Name: pointer.String(routeName), RoutePropertiesFormat: &network.RoutePropertiesFormat{}, } op, err := az.routeUpdater.addRouteOperation(routeOperationDelete, route) @@ -495,7 +495,7 @@ func (az *Cloud) DeleteRoute(ctx context.Context, clusterName string, kubeRoute routeNameWithoutIPV6Suffix := strings.Split(routeName, routeNameSeparator)[0] klog.V(2).Infof("DeleteRoute: deleting route. clusterName=%q instance=%q cidr=%q routeName=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR, routeNameWithoutIPV6Suffix) route := network.Route{ - Name: to.StringPtr(routeNameWithoutIPV6Suffix), + Name: pointer.String(routeNameWithoutIPV6Suffix), RoutePropertiesFormat: &network.RoutePropertiesFormat{}, } op, err := az.routeUpdater.addRouteOperation(routeOperationDelete, route) diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_standard.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_standard.go index f0f0c5fad702..d439df3fe43c 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_standard.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_standard.go @@ -31,7 +31,6 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network" - "github.com/Azure/go-autorest/autorest/to" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -43,6 +42,7 @@ import ( azcache "k8s.io/legacy-cloud-providers/azure/cache" "k8s.io/legacy-cloud-providers/azure/metrics" utilnet "k8s.io/utils/net" + "k8s.io/utils/pointer" ) const ( @@ -216,7 +216,7 @@ func getPrimaryInterfaceID(machine compute.VirtualMachine) (string, error) { } for _, ref := range *machine.NetworkProfile.NetworkInterfaces { - if to.Bool(ref.Primary) { + if pointer.BoolDeref(ref.Primary, false) { return *ref.ID, nil } } @@ -259,7 +259,7 @@ func getIPConfigByIPFamily(nic network.Interface, IPv6 bool) (*network.Interface return &ref, nil } } - return nil, fmt.Errorf("failed to determine the ipconfig(IPv6=%v). nicname=%q", IPv6, to.String(nic.Name)) + return nil, fmt.Errorf("failed to determine the ipconfig(IPv6=%v). nicname=%q", IPv6, pointer.StringDeref(nic.Name, "")) } func isInternalLoadBalancer(lb *network.LoadBalancer) bool { @@ -342,7 +342,7 @@ func (az *Cloud) serviceOwnsRule(service *v1.Service, rule string) bool { func (az *Cloud) serviceOwnsFrontendIP(fip network.FrontendIPConfiguration, service *v1.Service) (bool, bool, error) { var isPrimaryService bool baseName := az.GetLoadBalancerName(context.TODO(), "", service) - if strings.HasPrefix(to.String(fip.Name), baseName) { + if strings.HasPrefix(pointer.StringDeref(fip.Name, ""), baseName) { klog.V(6).Infof("serviceOwnsFrontendIP: found primary service %s of the "+ "frontend IP config %s", service.Name, *fip.Name) isPrimaryService = true @@ -369,7 +369,7 @@ func (az *Cloud) serviceOwnsFrontendIP(fip network.FrontendIPConfiguration, serv pip.IPAddress != nil && fip.FrontendIPConfigurationPropertiesFormat != nil && fip.FrontendIPConfigurationPropertiesFormat.PublicIPAddress != nil { - if strings.EqualFold(to.String(pip.ID), to.String(fip.PublicIPAddress.ID)) { + if strings.EqualFold(pointer.StringDeref(pip.ID, ""), pointer.StringDeref(fip.PublicIPAddress.ID, "")) { klog.V(4).Infof("serviceOwnsFrontendIP: found secondary service %s of the frontend IP config %s", service.Name, *fip.Name) return true, isPrimaryService, nil @@ -491,7 +491,7 @@ func (as *availabilitySet) GetPowerStatusByNodeName(name string) (powerState str if vm.InstanceView != nil && vm.InstanceView.Statuses != nil { statuses := *vm.InstanceView.Statuses for _, status := range statuses { - state := to.String(status.Code) + state := pointer.StringDeref(status.Code, "") if strings.HasPrefix(state, vmPowerStatePrefix) { return strings.TrimPrefix(state, vmPowerStatePrefix), nil } @@ -514,7 +514,7 @@ func (as *availabilitySet) GetProvisioningStateByNodeName(name string) (provisio return provisioningState, nil } - return to.String(vm.VirtualMachineProperties.ProvisioningState), nil + return pointer.StringDeref(vm.VirtualMachineProperties.ProvisioningState, ""), nil } // GetNodeNameByProviderID gets the node name by provider ID. @@ -556,15 +556,15 @@ func (as *availabilitySet) GetZoneByNodeName(name string) (cloudprovider.Zone, e return cloudprovider.Zone{}, fmt.Errorf("failed to parse zone %q: %v", zones, err) } - failureDomain = as.makeZone(to.String(vm.Location), zoneID) + failureDomain = as.makeZone(pointer.StringDeref(vm.Location, ""), zoneID) } else { // Availability zone is not used for the node, falling back to fault domain. - failureDomain = strconv.Itoa(int(to.Int32(vm.VirtualMachineProperties.InstanceView.PlatformFaultDomain))) + failureDomain = strconv.Itoa(int(pointer.Int32Deref(vm.VirtualMachineProperties.InstanceView.PlatformFaultDomain, 0))) } zone := cloudprovider.Zone{ FailureDomain: strings.ToLower(failureDomain), - Region: strings.ToLower(to.String(vm.Location)), + Region: strings.ToLower(pointer.StringDeref(vm.Location, "")), } return zone, nil } @@ -805,7 +805,7 @@ func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName stri var availabilitySetID string if machine.VirtualMachineProperties != nil && machine.AvailabilitySet != nil { - availabilitySetID = to.String(machine.AvailabilitySet.ID) + availabilitySetID = pointer.StringDeref(machine.AvailabilitySet.ID, "") } return nic, availabilitySetID, nil } @@ -882,7 +882,7 @@ func (as *availabilitySet) EnsureHostInPool(service *v1.Service, nodeName types. newBackendPools = append(newBackendPools, network.BackendAddressPool{ - ID: to.StringPtr(backendPoolID), + ID: pointer.String(backendPoolID), }) primaryIPConfig.LoadBalancerBackendAddressPools = &newBackendPools @@ -958,7 +958,7 @@ func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, backend ipConfigurationIDs := []string{} for _, backendPool := range *backendAddressPools { - if strings.EqualFold(to.String(backendPool.ID), backendPoolID) && + if strings.EqualFold(pointer.StringDeref(backendPool.ID, ""), backendPoolID) && backendPool.BackendAddressPoolPropertiesFormat != nil && backendPool.BackendIPConfigurations != nil { for _, ipConf := range *backendPool.BackendIPConfigurations { @@ -1013,7 +1013,7 @@ func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, backend if nic.InterfacePropertiesFormat != nil && nic.InterfacePropertiesFormat.IPConfigurations != nil { newIPConfigs := *nic.IPConfigurations for j, ipConf := range newIPConfigs { - if !to.Bool(ipConf.Primary) { + if !pointer.BoolDeref(ipConf.Primary, false) { continue } // found primary ip configuration @@ -1021,7 +1021,7 @@ func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, backend newLBAddressPools := *ipConf.LoadBalancerBackendAddressPools for k := len(newLBAddressPools) - 1; k >= 0; k-- { pool := newLBAddressPools[k] - if strings.EqualFold(to.String(pool.ID), backendPoolID) { + if strings.EqualFold(pointer.StringDeref(pool.ID, ""), backendPoolID) { newLBAddressPools = append(newLBAddressPools[:k], newLBAddressPools[k+1:]...) break } @@ -1033,10 +1033,10 @@ func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, backend nicUpdaters = append(nicUpdaters, func() error { ctx, cancel := getContextWithCancel() defer cancel() - klog.V(2).Infof("EnsureBackendPoolDeleted begins to CreateOrUpdate for NIC(%s, %s) with backendPoolID %s", as.resourceGroup, to.String(nic.Name), backendPoolID) - rerr := as.InterfacesClient.CreateOrUpdate(ctx, as.ResourceGroup, to.String(nic.Name), nic) + klog.V(2).Infof("EnsureBackendPoolDeleted begins to CreateOrUpdate for NIC(%s, %s) with backendPoolID %s", as.resourceGroup, pointer.StringDeref(nic.Name, ""), backendPoolID) + rerr := as.InterfacesClient.CreateOrUpdate(ctx, as.ResourceGroup, pointer.StringDeref(nic.Name, ""), nic) if rerr != nil { - klog.Errorf("EnsureBackendPoolDeleted CreateOrUpdate for NIC(%s, %s) failed with error %v", as.resourceGroup, to.String(nic.Name), rerr.Error()) + klog.Errorf("EnsureBackendPoolDeleted CreateOrUpdate for NIC(%s, %s) failed with error %v", as.resourceGroup, pointer.StringDeref(nic.Name, ""), rerr.Error()) return rerr.Error() } return nil @@ -1098,7 +1098,7 @@ func (as *availabilitySet) GetNodeNameByIPConfigurationID(ipConfigurationID stri } vmID := "" if nic.InterfacePropertiesFormat != nil && nic.VirtualMachine != nil { - vmID = to.String(nic.VirtualMachine.ID) + vmID = pointer.StringDeref(nic.VirtualMachine.ID, "") } if vmID == "" { klog.V(2).Infof("GetNodeNameByIPConfigurationID(%s): empty vmID", ipConfigurationID) @@ -1118,7 +1118,7 @@ func (as *availabilitySet) GetNodeNameByIPConfigurationID(ipConfigurationID stri } asID := "" if vm.VirtualMachineProperties != nil && vm.AvailabilitySet != nil { - asID = to.String(vm.AvailabilitySet.ID) + asID = pointer.StringDeref(vm.AvailabilitySet.ID, "") } if asID == "" { return vmName, "", nil diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_storageaccount.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_storageaccount.go index cc3a61094224..1365d9032a7b 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_storageaccount.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_storageaccount.go @@ -24,9 +24,9 @@ import ( "strings" "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage" - "github.com/Azure/go-autorest/autorest/to" "k8s.io/klog/v2" + "k8s.io/utils/pointer" ) // AccountOptions contains the fields which are used to create storage account. @@ -76,7 +76,7 @@ func (az *Cloud) getStorageAccounts(accountOptions *AccountOptions) ([]accountWi found := false for _, subnetID := range accountOptions.VirtualNetworkResourceIDs { for _, rule := range *acct.AccountProperties.NetworkRuleSet.VirtualNetworkRules { - if strings.EqualFold(to.String(rule.VirtualNetworkResourceID), subnetID) && rule.Action == storage.Allow { + if strings.EqualFold(pointer.StringDeref(rule.VirtualNetworkResourceID, ""), subnetID) && rule.Action == storage.Allow { found = true break } @@ -199,7 +199,7 @@ func (az *Cloud) EnsureStorageAccount(accountOptions *AccountOptions, genAccount defer cancel() rerr := az.StorageAccountClient.Create(ctx, resourceGroup, accountName, cp) if rerr != nil { - return "", "", fmt.Errorf(fmt.Sprintf("Failed to create storage account %s, error: %v", accountName, rerr)) + return "", "", fmt.Errorf("failed to create storage account %s, error: %v", accountName, rerr) } } } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_utils.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_utils.go index 391f144f9406..2a9c3c1aa915 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_utils.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_utils.go @@ -25,9 +25,8 @@ import ( "strings" "sync" - "github.com/Azure/go-autorest/autorest/to" - "k8s.io/klog/v2" + "k8s.io/utils/pointer" ) const ( @@ -135,7 +134,7 @@ func parseTags(tags string) map[string]*string { klog.Warningf("parseTags: error when parsing key-value pair %s-%s, would ignore this one", k, v) continue } - formatted[strings.ToLower(k)] = to.StringPtr(v) + formatted[strings.ToLower(k)] = pointer.String(v) } return formatted } @@ -156,7 +155,7 @@ func reconcileTags(currentTagsOnResource, newTags map[string]*string) (reconcile if !found { currentTagsOnResource[k] = v changed = true - } else if !strings.EqualFold(to.String(v), to.String(currentTagsOnResource[key])) { + } else if !strings.EqualFold(pointer.StringDeref(v, ""), pointer.StringDeref(currentTagsOnResource[key], "")) { currentTagsOnResource[key] = v changed = true } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go index cdc43a6ee9bd..c9059355f1d3 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go @@ -30,7 +30,6 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network" - "github.com/Azure/go-autorest/autorest/to" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -40,6 +39,7 @@ import ( azcache "k8s.io/legacy-cloud-providers/azure/cache" "k8s.io/legacy-cloud-providers/azure/metrics" utilnet "k8s.io/utils/net" + "k8s.io/utils/pointer" ) var ( @@ -233,7 +233,7 @@ func (ss *scaleSet) GetPowerStatusByNodeName(name string) (powerState string, er if vm.InstanceView != nil && vm.InstanceView.Statuses != nil { statuses := *vm.InstanceView.Statuses for _, status := range statuses { - state := to.String(status.Code) + state := pointer.StringDeref(status.Code, "") if strings.HasPrefix(state, vmPowerStatePrefix) { return strings.TrimPrefix(state, vmPowerStatePrefix), nil } @@ -266,7 +266,7 @@ func (ss *scaleSet) GetProvisioningStateByNodeName(name string) (provisioningSta return provisioningState, nil } - return to.String(vm.VirtualMachineScaleSetVMProperties.ProvisioningState), nil + return pointer.StringDeref(vm.VirtualMachineScaleSetVMProperties.ProvisioningState, ""), nil } // getCachedVirtualMachineByInstanceID gets scaleSetVMInfo from cache. @@ -455,7 +455,7 @@ func (ss *scaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) { return cloudprovider.Zone{}, fmt.Errorf("failed to parse zone %q: %v", zones, err) } - failureDomain = ss.makeZone(to.String(vm.Location), zoneID) + failureDomain = ss.makeZone(pointer.StringDeref(vm.Location, ""), zoneID) } else if vm.InstanceView != nil && vm.InstanceView.PlatformFaultDomain != nil { // Availability zone is not used for the node, falling back to fault domain. failureDomain = strconv.Itoa(int(*vm.InstanceView.PlatformFaultDomain)) @@ -468,7 +468,7 @@ func (ss *scaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) { return cloudprovider.Zone{ FailureDomain: strings.ToLower(failureDomain), - Region: strings.ToLower(to.String(vm.Location)), + Region: strings.ToLower(pointer.StringDeref(vm.Location, "")), }, nil } @@ -565,7 +565,7 @@ func (ss *scaleSet) GetPrivateIPsByNodeName(nodeName string) ([]string, error) { // This returns the full identifier of the primary NIC for the given VM. func (ss *scaleSet) getPrimaryInterfaceID(machine compute.VirtualMachineScaleSetVM) (string, error) { if machine.NetworkProfile == nil || machine.NetworkProfile.NetworkInterfaces == nil { - return "", fmt.Errorf("failed to find the network interfaces for vm %s", to.String(machine.Name)) + return "", fmt.Errorf("failed to find the network interfaces for vm %s", pointer.StringDeref(machine.Name, "")) } if len(*machine.NetworkProfile.NetworkInterfaces) == 1 { @@ -573,12 +573,12 @@ func (ss *scaleSet) getPrimaryInterfaceID(machine compute.VirtualMachineScaleSet } for _, ref := range *machine.NetworkProfile.NetworkInterfaces { - if to.Bool(ref.Primary) { + if pointer.BoolDeref(ref.Primary, false) { return *ref.ID, nil } } - return "", fmt.Errorf("failed to find a primary nic for the vm. vmname=%q", to.String(machine.Name)) + return "", fmt.Errorf("failed to find a primary nic for the vm. vmname=%q", pointer.StringDeref(machine.Name, "")) } // getVmssMachineID returns the full identifier of a vmss virtual machine. @@ -642,7 +642,7 @@ func (ss *scaleSet) listScaleSets(resourceGroup string) ([]string, error) { ssNames := make([]string, 0) for _, vmss := range allScaleSets { name := *vmss.Name - if vmss.Sku != nil && to.Int64(vmss.Sku.Capacity) == 0 { + if vmss.Sku != nil && pointer.Int64Deref(vmss.Sku.Capacity, 0) == 0 { klog.V(3).Infof("Capacity of VMSS %q is 0, skipping", name) continue } @@ -1069,7 +1069,7 @@ func (ss *scaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeNam // Compose a new vmssVM with added backendPoolID. newBackendPools = append(newBackendPools, compute.SubResource{ - ID: to.StringPtr(backendPoolID), + ID: pointer.String(backendPoolID), }) primaryIPConfiguration.LoadBalancerBackendAddressPools = &newBackendPools newVM := &compute.VirtualMachineScaleSetVM{ @@ -1215,7 +1215,7 @@ func (ss *scaleSet) ensureVMSSInPool(service *v1.Service, nodes []*v1.Node, back // Compose a new vmss with added backendPoolID. loadBalancerBackendAddressPools = append(loadBalancerBackendAddressPools, compute.SubResource{ - ID: to.StringPtr(backendPoolID), + ID: pointer.String(backendPoolID), }) primaryIPConfig.LoadBalancerBackendAddressPools = &loadBalancerBackendAddressPools newVMSS := compute.VirtualMachineScaleSet{ diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go index 263c2d3ddcae..0bbf33306359 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go @@ -27,11 +27,11 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute" - "github.com/Azure/go-autorest/autorest/to" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" azcache "k8s.io/legacy-cloud-providers/azure/cache" + "k8s.io/utils/pointer" ) var ( @@ -207,13 +207,13 @@ func (ss *scaleSet) newVMSSVirtualMachinesCache(resourceGroupName, vmssName, cac vmssVMCacheEntry := &vmssVirtualMachinesEntry{ resourceGroup: resourceGroupName, vmssName: vmssName, - instanceID: to.String(vm.InstanceID), + instanceID: pointer.StringDeref(vm.InstanceID, ""), virtualMachine: &vm, lastUpdate: time.Now().UTC(), } // set cache entry to nil when the VM is under deleting. if vm.VirtualMachineScaleSetVMProperties != nil && - strings.EqualFold(to.String(vm.VirtualMachineScaleSetVMProperties.ProvisioningState), string(compute.ProvisioningStateDeleting)) { + strings.EqualFold(pointer.StringDeref(vm.VirtualMachineScaleSetVMProperties.ProvisioningState, ""), string(compute.ProvisioningStateDeleting)) { klog.V(4).Infof("VMSS virtualMachine %q is under deleting, setting its cache to nil", computerName) vmssVMCacheEntry.virtualMachine = nil } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_wrap.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_wrap.go index 9ebdec2d0657..0f3e351f96f4 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_wrap.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_wrap.go @@ -28,13 +28,13 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network" - "github.com/Azure/go-autorest/autorest/to" "k8s.io/apimachinery/pkg/types" cloudprovider "k8s.io/cloud-provider" "k8s.io/klog/v2" azcache "k8s.io/legacy-cloud-providers/azure/cache" "k8s.io/legacy-cloud-providers/azure/retry" + "k8s.io/utils/pointer" ) var ( @@ -197,7 +197,7 @@ func (az *Cloud) newVMCache() (*azcache.TimedCache, error) { } if vm.VirtualMachineProperties != nil && - strings.EqualFold(to.String(vm.VirtualMachineProperties.ProvisioningState), string(compute.ProvisioningStateDeleting)) { + strings.EqualFold(pointer.StringDeref(vm.VirtualMachineProperties.ProvisioningState, ""), string(compute.ProvisioningStateDeleting)) { klog.V(2).Infof("Virtual machine %q is under deleting", key) return nil, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/containerserviceclient/azure_containerserviceclient.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/containerserviceclient/azure_containerserviceclient.go index d087200d8be8..4ccc36763889 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/containerserviceclient/azure_containerserviceclient.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/containerserviceclient/azure_containerserviceclient.go @@ -28,7 +28,6 @@ import ( "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-04-01/containerservice" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/to" "k8s.io/client-go/util/flowcontrol" "k8s.io/klog/v2" @@ -36,6 +35,7 @@ import ( "k8s.io/legacy-cloud-providers/azure/clients/armclient" "k8s.io/legacy-cloud-providers/azure/metrics" "k8s.io/legacy-cloud-providers/azure/retry" + "k8s.io/utils/pointer" ) var _ Interface = &Client{} @@ -197,7 +197,7 @@ func (c *Client) listManagedCluster(ctx context.Context, resourceGroupName strin result = append(result, page.Values()...) // Abort the loop when there's no nextLink in the response. - if to.String(page.Response().NextLink) == "" { + if pointer.StringDeref(page.Response().NextLink, "") == "" { break } @@ -223,12 +223,12 @@ func (c *Client) listResponder(resp *http.Response) (result containerservice.Man // managedClusterListResultPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. func (c *Client) managedClusterListResultPreparer(ctx context.Context, mclr containerservice.ManagedClusterListResult) (*http.Request, error) { - if mclr.NextLink == nil || len(to.String(mclr.NextLink)) < 1 { + if mclr.NextLink == nil || len(pointer.StringDeref(mclr.NextLink, "")) < 1 { return nil, nil } decorators := []autorest.PrepareDecorator{ - autorest.WithBaseURL(to.String(mclr.NextLink)), + autorest.WithBaseURL(pointer.StringDeref(mclr.NextLink, "")), } return c.armClient.PrepareGetRequest(ctx, decorators...) } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/deploymentclient/azure_deploymentclient.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/deploymentclient/azure_deploymentclient.go index 4ba1983e1646..b12f7d81d4d6 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/deploymentclient/azure_deploymentclient.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/deploymentclient/azure_deploymentclient.go @@ -28,7 +28,6 @@ import ( "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/to" "k8s.io/client-go/util/flowcontrol" "k8s.io/klog/v2" @@ -36,6 +35,7 @@ import ( "k8s.io/legacy-cloud-providers/azure/clients/armclient" "k8s.io/legacy-cloud-providers/azure/metrics" "k8s.io/legacy-cloud-providers/azure/retry" + "k8s.io/utils/pointer" ) var _ Interface = &Client{} @@ -197,7 +197,7 @@ func (c *Client) listDeployment(ctx context.Context, resourceGroupName string) ( result = append(result, page.Values()...) // Abort the loop when there's no nextLink in the response. - if to.String(page.Response().NextLink) == "" { + if pointer.StringDeref(page.Response().NextLink, "") == "" { break } @@ -223,12 +223,12 @@ func (c *Client) listResponder(resp *http.Response) (result resources.Deployment // deploymentListResultPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. func (c *Client) deploymentListResultPreparer(ctx context.Context, dplr resources.DeploymentListResult) (*http.Request, error) { - if dplr.NextLink == nil || len(to.String(dplr.NextLink)) < 1 { + if dplr.NextLink == nil || len(pointer.StringDeref(dplr.NextLink, "")) < 1 { return nil, nil } decorators := []autorest.PrepareDecorator{ - autorest.WithBaseURL(to.String(dplr.NextLink)), + autorest.WithBaseURL(pointer.StringDeref(dplr.NextLink, "")), } return c.armClient.PrepareGetRequest(ctx, decorators...) } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/diskclient/azure_diskclient.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/diskclient/azure_diskclient.go index 979920e3374c..4b64ef28465b 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/diskclient/azure_diskclient.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/diskclient/azure_diskclient.go @@ -29,7 +29,6 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/to" "k8s.io/client-go/util/flowcontrol" "k8s.io/klog/v2" @@ -37,6 +36,7 @@ import ( "k8s.io/legacy-cloud-providers/azure/clients/armclient" "k8s.io/legacy-cloud-providers/azure/metrics" "k8s.io/legacy-cloud-providers/azure/retry" + "k8s.io/utils/pointer" ) var _ Interface = &Client{} @@ -351,7 +351,7 @@ func (c *Client) ListByResourceGroup(ctx context.Context, resourceGroupName stri result = append(result, page.Values()...) // Abort the loop when there's no nextLink in the response. - if to.String(page.Response().NextLink) == "" { + if pointer.StringDeref(page.Response().NextLink, "") == "" { break } @@ -401,13 +401,13 @@ func (c *Client) listResponder(resp *http.Response) (result compute.DiskList, er } func (c *Client) diskListPreparer(ctx context.Context, lr compute.DiskList) (*http.Request, error) { - if lr.NextLink == nil || len(to.String(lr.NextLink)) < 1 { + if lr.NextLink == nil || len(pointer.StringDeref(lr.NextLink, "")) < 1 { return nil, nil } return autorest.Prepare((&http.Request{}).WithContext(ctx), autorest.AsJSON(), autorest.AsGet(), - autorest.WithBaseURL(to.String(lr.NextLink))) + autorest.WithBaseURL(pointer.StringDeref(lr.NextLink, ""))) } // DiskListPage contains a page of Disk values. diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/loadbalancerclient/azure_loadbalancerclient.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/loadbalancerclient/azure_loadbalancerclient.go index b2ebe6bfc34d..49c6e51af1b3 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/loadbalancerclient/azure_loadbalancerclient.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/loadbalancerclient/azure_loadbalancerclient.go @@ -28,7 +28,6 @@ import ( "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/to" "k8s.io/client-go/util/flowcontrol" "k8s.io/klog/v2" @@ -36,6 +35,7 @@ import ( "k8s.io/legacy-cloud-providers/azure/clients/armclient" "k8s.io/legacy-cloud-providers/azure/metrics" "k8s.io/legacy-cloud-providers/azure/retry" + "k8s.io/utils/pointer" ) var _ Interface = &Client{} @@ -197,7 +197,7 @@ func (c *Client) listLB(ctx context.Context, resourceGroupName string) ([]networ result = append(result, page.Values()...) // Abort the loop when there's no nextLink in the response. - if to.String(page.Response().NextLink) == "" { + if pointer.StringDeref(page.Response().NextLink, "") == "" { break } @@ -341,12 +341,12 @@ func (c *Client) listResponder(resp *http.Response) (result network.LoadBalancer // loadBalancerListResultPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. func (c *Client) loadBalancerListResultPreparer(ctx context.Context, lblr network.LoadBalancerListResult) (*http.Request, error) { - if lblr.NextLink == nil || len(to.String(lblr.NextLink)) < 1 { + if lblr.NextLink == nil || len(pointer.StringDeref(lblr.NextLink, "")) < 1 { return nil, nil } decorators := []autorest.PrepareDecorator{ - autorest.WithBaseURL(to.String(lblr.NextLink)), + autorest.WithBaseURL(pointer.StringDeref(lblr.NextLink, "")), } return c.armClient.PrepareGetRequest(ctx, decorators...) } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/publicipclient/azure_publicipclient.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/publicipclient/azure_publicipclient.go index b9d7424579c3..c889d814e3bc 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/publicipclient/azure_publicipclient.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/publicipclient/azure_publicipclient.go @@ -28,7 +28,6 @@ import ( "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/to" "k8s.io/client-go/util/flowcontrol" "k8s.io/klog/v2" @@ -36,6 +35,7 @@ import ( "k8s.io/legacy-cloud-providers/azure/clients/armclient" "k8s.io/legacy-cloud-providers/azure/metrics" "k8s.io/legacy-cloud-providers/azure/retry" + "k8s.io/utils/pointer" ) var _ Interface = &Client{} @@ -270,7 +270,7 @@ func (c *Client) listPublicIPAddress(ctx context.Context, resourceGroupName stri result = append(result, page.Values()...) // Abort the loop when there's no nextLink in the response. - if to.String(page.Response().NextLink) == "" { + if pointer.StringDeref(page.Response().NextLink, "") == "" { break } @@ -407,12 +407,12 @@ func (c *Client) listResponder(resp *http.Response) (result network.PublicIPAddr // publicIPAddressListResultPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. func (c *Client) publicIPAddressListResultPreparer(ctx context.Context, lr network.PublicIPAddressListResult) (*http.Request, error) { - if lr.NextLink == nil || len(to.String(lr.NextLink)) < 1 { + if lr.NextLink == nil || len(pointer.StringDeref(lr.NextLink, "")) < 1 { return nil, nil } decorators := []autorest.PrepareDecorator{ - autorest.WithBaseURL(to.String(lr.NextLink)), + autorest.WithBaseURL(pointer.StringDeref(lr.NextLink, "")), } return c.armClient.PrepareGetRequest(ctx, decorators...) } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/securitygroupclient/azure_securitygroupclient.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/securitygroupclient/azure_securitygroupclient.go index cd948d5d1556..10c9bedaaa5a 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/securitygroupclient/azure_securitygroupclient.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/securitygroupclient/azure_securitygroupclient.go @@ -28,7 +28,6 @@ import ( "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/to" "k8s.io/client-go/util/flowcontrol" "k8s.io/klog/v2" @@ -36,6 +35,7 @@ import ( "k8s.io/legacy-cloud-providers/azure/clients/armclient" "k8s.io/legacy-cloud-providers/azure/metrics" "k8s.io/legacy-cloud-providers/azure/retry" + "k8s.io/utils/pointer" ) var _ Interface = &Client{} @@ -197,7 +197,7 @@ func (c *Client) listSecurityGroup(ctx context.Context, resourceGroupName string result = append(result, page.Values()...) // Abort the loop when there's no nextLink in the response. - if to.String(page.Response().NextLink) == "" { + if pointer.StringDeref(page.Response().NextLink, "") == "" { break } @@ -341,12 +341,12 @@ func (c *Client) listResponder(resp *http.Response) (result network.SecurityGrou // securityGroupListResultPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. func (c *Client) securityGroupListResultPreparer(ctx context.Context, sglr network.SecurityGroupListResult) (*http.Request, error) { - if sglr.NextLink == nil || len(to.String(sglr.NextLink)) < 1 { + if sglr.NextLink == nil || len(pointer.StringDeref(sglr.NextLink, "")) < 1 { return nil, nil } decorators := []autorest.PrepareDecorator{ - autorest.WithBaseURL(to.String(sglr.NextLink)), + autorest.WithBaseURL(pointer.StringDeref(sglr.NextLink, "")), } return c.armClient.PrepareGetRequest(ctx, decorators...) } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/snapshotclient/azure_snapshotclient.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/snapshotclient/azure_snapshotclient.go index eab3d34d5f61..973f60fb21ae 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/snapshotclient/azure_snapshotclient.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/snapshotclient/azure_snapshotclient.go @@ -28,7 +28,6 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/to" "k8s.io/client-go/util/flowcontrol" "k8s.io/klog/v2" @@ -36,6 +35,7 @@ import ( "k8s.io/legacy-cloud-providers/azure/clients/armclient" "k8s.io/legacy-cloud-providers/azure/metrics" "k8s.io/legacy-cloud-providers/azure/retry" + "k8s.io/utils/pointer" ) var _ Interface = &Client{} @@ -308,7 +308,7 @@ func (c *Client) listSnapshotsByResourceGroup(ctx context.Context, resourceGroup result = append(result, page.Values()...) // Abort the loop when there's no nextLink in the response. - if to.String(page.Response().NextLink) == "" { + if pointer.StringDeref(page.Response().NextLink, "") == "" { break } @@ -334,12 +334,12 @@ func (c *Client) listResponder(resp *http.Response) (result compute.SnapshotList // SnapshotListResultPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. func (c *Client) SnapshotListResultPreparer(ctx context.Context, lr compute.SnapshotList) (*http.Request, error) { - if lr.NextLink == nil || len(to.String(lr.NextLink)) < 1 { + if lr.NextLink == nil || len(pointer.StringDeref(lr.NextLink, "")) < 1 { return nil, nil } decorators := []autorest.PrepareDecorator{ - autorest.WithBaseURL(to.String(lr.NextLink)), + autorest.WithBaseURL(pointer.StringDeref(lr.NextLink, "")), } return c.armClient.PrepareGetRequest(ctx, decorators...) } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/storageaccountclient/azure_storageaccountclient.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/storageaccountclient/azure_storageaccountclient.go index 9699983a9587..67514881070a 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/storageaccountclient/azure_storageaccountclient.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/storageaccountclient/azure_storageaccountclient.go @@ -29,7 +29,6 @@ import ( "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/to" "k8s.io/client-go/util/flowcontrol" "k8s.io/klog/v2" @@ -37,6 +36,7 @@ import ( "k8s.io/legacy-cloud-providers/azure/clients/armclient" "k8s.io/legacy-cloud-providers/azure/metrics" "k8s.io/legacy-cloud-providers/azure/retry" + "k8s.io/utils/pointer" ) var _ Interface = &Client{} @@ -374,7 +374,7 @@ func (c *Client) ListStorageAccountByResourceGroup(ctx context.Context, resource result = append(result, page.Values()...) // Abort the loop when there's no nextLink in the response. - if to.String(page.Response().NextLink) == "" { + if pointer.StringDeref(page.Response().NextLink, "") == "" { break } @@ -400,12 +400,12 @@ func (c *Client) listResponder(resp *http.Response) (result storage.AccountListR // StorageAccountResultPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. func (c *Client) StorageAccountResultPreparer(ctx context.Context, lr storage.AccountListResult) (*http.Request, error) { - if lr.NextLink == nil || len(to.String(lr.NextLink)) < 1 { + if lr.NextLink == nil || len(pointer.StringDeref(lr.NextLink, "")) < 1 { return nil, nil } decorators := []autorest.PrepareDecorator{ - autorest.WithBaseURL(to.String(lr.NextLink)), + autorest.WithBaseURL(pointer.StringDeref(lr.NextLink, "")), } return c.armClient.PrepareGetRequest(ctx, decorators...) } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/subnetclient/azure_subnetclient.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/subnetclient/azure_subnetclient.go index 16a973f31631..e9b181bcf310 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/subnetclient/azure_subnetclient.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/subnetclient/azure_subnetclient.go @@ -27,7 +27,6 @@ import ( "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/to" "k8s.io/client-go/util/flowcontrol" "k8s.io/klog/v2" @@ -35,6 +34,7 @@ import ( "k8s.io/legacy-cloud-providers/azure/clients/armclient" "k8s.io/legacy-cloud-providers/azure/metrics" "k8s.io/legacy-cloud-providers/azure/retry" + "k8s.io/utils/pointer" ) var _ Interface = &Client{} @@ -202,7 +202,7 @@ func (c *Client) listSubnet(ctx context.Context, resourceGroupName string, virtu result = append(result, page.Values()...) // Abort the loop when there's no nextLink in the response. - if to.String(page.Response().NextLink) == "" { + if pointer.StringDeref(page.Response().NextLink, "") == "" { break } @@ -341,12 +341,12 @@ func (c *Client) listResponder(resp *http.Response) (result network.SubnetListRe // subnetListResultPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. func (c *Client) subnetListResultPreparer(ctx context.Context, lblr network.SubnetListResult) (*http.Request, error) { - if lblr.NextLink == nil || len(to.String(lblr.NextLink)) < 1 { + if lblr.NextLink == nil || len(pointer.StringDeref(lblr.NextLink, "")) < 1 { return nil, nil } decorators := []autorest.PrepareDecorator{ - autorest.WithBaseURL(to.String(lblr.NextLink)), + autorest.WithBaseURL(pointer.StringDeref(lblr.NextLink, "")), } return c.armClient.PrepareGetRequest(ctx, decorators...) } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/vmclient/azure_vmclient.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/vmclient/azure_vmclient.go index cd4c0fe3aff2..7feeb8ba08af 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/vmclient/azure_vmclient.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/vmclient/azure_vmclient.go @@ -28,7 +28,6 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/to" "k8s.io/client-go/util/flowcontrol" "k8s.io/klog/v2" @@ -36,6 +35,7 @@ import ( "k8s.io/legacy-cloud-providers/azure/clients/armclient" "k8s.io/legacy-cloud-providers/azure/metrics" "k8s.io/legacy-cloud-providers/azure/retry" + "k8s.io/utils/pointer" ) var _ Interface = &Client{} @@ -199,7 +199,7 @@ func (c *Client) listVM(ctx context.Context, resourceGroupName string) ([]comput result = append(result, page.Values()...) // Abort the loop when there's no nextLink in the response. - if to.String(page.Response().NextLink) == "" { + if pointer.StringDeref(page.Response().NextLink, "") == "" { break } @@ -296,12 +296,12 @@ func (c *Client) listResponder(resp *http.Response) (result compute.VirtualMachi // vmListResultPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. func (c *Client) vmListResultPreparer(ctx context.Context, vmlr compute.VirtualMachineListResult) (*http.Request, error) { - if vmlr.NextLink == nil || len(to.String(vmlr.NextLink)) < 1 { + if vmlr.NextLink == nil || len(pointer.StringDeref(vmlr.NextLink, "")) < 1 { return nil, nil } decorators := []autorest.PrepareDecorator{ - autorest.WithBaseURL(to.String(vmlr.NextLink)), + autorest.WithBaseURL(pointer.StringDeref(vmlr.NextLink, "")), } return c.armClient.PrepareGetRequest(ctx, decorators...) } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/azure_vmssclient.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/azure_vmssclient.go index 3f29a2578252..e83ad41b9a8b 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/azure_vmssclient.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/azure_vmssclient.go @@ -28,7 +28,6 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/to" "k8s.io/client-go/util/flowcontrol" "k8s.io/klog/v2" @@ -36,6 +35,7 @@ import ( "k8s.io/legacy-cloud-providers/azure/clients/armclient" "k8s.io/legacy-cloud-providers/azure/metrics" "k8s.io/legacy-cloud-providers/azure/retry" + "k8s.io/utils/pointer" ) var _ Interface = &Client{} @@ -197,7 +197,7 @@ func (c *Client) listVMSS(ctx context.Context, resourceGroupName string) ([]comp result = append(result, page.Values()...) // Abort the loop when there's no nextLink in the response. - if to.String(page.Response().NextLink) == "" { + if pointer.StringDeref(page.Response().NextLink, "") == "" { break } @@ -365,12 +365,12 @@ func (c *Client) listResponder(resp *http.Response) (result compute.VirtualMachi // virtualMachineScaleSetListResultPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. func (c *Client) virtualMachineScaleSetListResultPreparer(ctx context.Context, vmsslr compute.VirtualMachineScaleSetListResult) (*http.Request, error) { - if vmsslr.NextLink == nil || len(to.String(vmsslr.NextLink)) < 1 { + if vmsslr.NextLink == nil || len(pointer.StringDeref(vmsslr.NextLink, "")) < 1 { return nil, nil } decorators := []autorest.PrepareDecorator{ - autorest.WithBaseURL(to.String(vmsslr.NextLink)), + autorest.WithBaseURL(pointer.StringDeref(vmsslr.NextLink, "")), } return c.armClient.PrepareGetRequest(ctx, decorators...) } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/azure_vmssvmclient.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/azure_vmssvmclient.go index 48844cffb76a..154b9f76ad11 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/azure_vmssvmclient.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/azure_vmssvmclient.go @@ -28,7 +28,6 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/to" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/util/flowcontrol" @@ -37,6 +36,7 @@ import ( "k8s.io/legacy-cloud-providers/azure/clients/armclient" "k8s.io/legacy-cloud-providers/azure/metrics" "k8s.io/legacy-cloud-providers/azure/retry" + "k8s.io/utils/pointer" ) var _ Interface = &Client{} @@ -203,7 +203,7 @@ func (c *Client) listVMSSVM(ctx context.Context, resourceGroupName string, virtu result = append(result, page.Values()...) // Abort the loop when there's no nextLink in the response. - if to.String(page.Response().NextLink) == "" { + if pointer.StringDeref(page.Response().NextLink, "") == "" { break } @@ -299,12 +299,12 @@ func (c *Client) listResponder(resp *http.Response) (result compute.VirtualMachi // virtualMachineScaleSetListResultPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. func (c *Client) virtualMachineScaleSetVMListResultPreparer(ctx context.Context, vmssvmlr compute.VirtualMachineScaleSetVMListResult) (*http.Request, error) { - if vmssvmlr.NextLink == nil || len(to.String(vmssvmlr.NextLink)) < 1 { + if vmssvmlr.NextLink == nil || len(pointer.StringDeref(vmssvmlr.NextLink, "")) < 1 { return nil, nil } decorators := []autorest.PrepareDecorator{ - autorest.WithBaseURL(to.String(vmssvmlr.NextLink)), + autorest.WithBaseURL(pointer.StringDeref(vmssvmlr.NextLink, "")), } return c.armClient.PrepareGetRequest(ctx, decorators...) } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/vsphere/vclib/utils.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/vsphere/vclib/utils.go index da468b3c86bb..24ec9819f081 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/vsphere/vclib/utils.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/vsphere/vclib/utils.go @@ -94,7 +94,7 @@ func getNextUnitNumber(devices object.VirtualDeviceList, c types.BaseVirtualCont for _, device := range devices { d := device.GetVirtualDevice() if d.ControllerKey == key { - if d.UnitNumber != nil { + if d.UnitNumber != nil && *d.UnitNumber < SCSIDeviceSlots { takenUnitNumbers[*d.UnitNumber] = true } } diff --git a/cluster-autoscaler/vendor/k8s.io/mount-utils/fake_mounter.go b/cluster-autoscaler/vendor/k8s.io/mount-utils/fake_mounter.go index 5a5701598837..084428445cef 100644 --- a/cluster-autoscaler/vendor/k8s.io/mount-utils/fake_mounter.go +++ b/cluster-autoscaler/vendor/k8s.io/mount-utils/fake_mounter.go @@ -218,7 +218,7 @@ func (f *FakeMounter) IsLikelyNotMountPoint(file string) (bool, error) { return true, nil } -func (f *FakeMounter) canSafelySkipMountPointCheck() bool { +func (f *FakeMounter) CanSafelySkipMountPointCheck() bool { return f.skipMountPointCheck } diff --git a/cluster-autoscaler/vendor/k8s.io/mount-utils/mount.go b/cluster-autoscaler/vendor/k8s.io/mount-utils/mount.go index 57abb69196ec..d7845ac495c0 100644 --- a/cluster-autoscaler/vendor/k8s.io/mount-utils/mount.go +++ b/cluster-autoscaler/vendor/k8s.io/mount-utils/mount.go @@ -65,10 +65,10 @@ type Interface interface { // care about such situations, this is a faster alternative to calling List() // and scanning that output. IsLikelyNotMountPoint(file string) (bool, error) - // canSafelySkipMountPointCheck indicates whether this mounter returns errors on + // CanSafelySkipMountPointCheck indicates whether this mounter returns errors on // operations for targets that are not mount points. If this returns true, no such // errors will be returned. - canSafelySkipMountPointCheck() bool + CanSafelySkipMountPointCheck() bool // IsMountPoint determines if a directory is a mountpoint. // It should return ErrNotExist when the directory does not exist. // IsMountPoint is more expensive than IsLikelyNotMountPoint. diff --git a/cluster-autoscaler/vendor/k8s.io/mount-utils/mount_helper_common.go b/cluster-autoscaler/vendor/k8s.io/mount-utils/mount_helper_common.go index dc4131d78e36..440cf64dd079 100644 --- a/cluster-autoscaler/vendor/k8s.io/mount-utils/mount_helper_common.go +++ b/cluster-autoscaler/vendor/k8s.io/mount-utils/mount_helper_common.go @@ -52,9 +52,9 @@ func CleanupMountWithForce(mountPath string, mounter MounterForceUnmounter, exte return fmt.Errorf("Error checking path: %v", pathErr) } - if corruptedMnt || mounter.canSafelySkipMountPointCheck() { + if corruptedMnt || mounter.CanSafelySkipMountPointCheck() { klog.V(4).Infof("unmounting %q (corruptedMount: %t, mounterCanSkipMountPointChecks: %t)", - mountPath, corruptedMnt, mounter.canSafelySkipMountPointCheck()) + mountPath, corruptedMnt, mounter.CanSafelySkipMountPointCheck()) if err := mounter.UnmountWithForce(mountPath, umountTimeout); err != nil { return err } @@ -89,9 +89,9 @@ func CleanupMountWithForce(mountPath string, mounter MounterForceUnmounter, exte // if corruptedMnt is true, it means that the mountPath is a corrupted mountpoint, and the mount point check // will be skipped. The mount point check will also be skipped if the mounter supports it. func doCleanupMountPoint(mountPath string, mounter Interface, extensiveMountPointCheck bool, corruptedMnt bool) error { - if corruptedMnt || mounter.canSafelySkipMountPointCheck() { + if corruptedMnt || mounter.CanSafelySkipMountPointCheck() { klog.V(4).Infof("unmounting %q (corruptedMount: %t, mounterCanSkipMountPointChecks: %t)", - mountPath, corruptedMnt, mounter.canSafelySkipMountPointCheck()) + mountPath, corruptedMnt, mounter.CanSafelySkipMountPointCheck()) if err := mounter.Unmount(mountPath); err != nil { return err } diff --git a/cluster-autoscaler/vendor/k8s.io/mount-utils/mount_linux.go b/cluster-autoscaler/vendor/k8s.io/mount-utils/mount_linux.go index 1752d11f6400..a7c00ea1b6dd 100644 --- a/cluster-autoscaler/vendor/k8s.io/mount-utils/mount_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/mount-utils/mount_linux.go @@ -23,7 +23,6 @@ import ( "context" "errors" "fmt" - "github.com/moby/sys/mountinfo" "io/fs" "io/ioutil" "os" @@ -34,6 +33,8 @@ import ( "syscall" "time" + "github.com/moby/sys/mountinfo" + "k8s.io/klog/v2" utilexec "k8s.io/utils/exec" utilio "k8s.io/utils/io" @@ -382,7 +383,7 @@ func (mounter *Mounter) Unmount(target string) error { // UnmountWithForce unmounts given target but will retry unmounting with force option // after given timeout. func (mounter *Mounter) UnmountWithForce(target string, umountTimeout time.Duration) error { - err := tryUnmount(target, umountTimeout) + err := tryUnmount(mounter, target, umountTimeout) if err != nil { if err == context.DeadlineExceeded { klog.V(2).Infof("Timed out waiting for unmount of %s, trying with -f", target) @@ -422,8 +423,8 @@ func (mounter *Mounter) IsLikelyNotMountPoint(file string) (bool, error) { return true, nil } -// canSafelySkipMountPointCheck relies on the detected behavior of umount when given a target that is not a mount point. -func (mounter *Mounter) canSafelySkipMountPointCheck() bool { +// CanSafelySkipMountPointCheck relies on the detected behavior of umount when given a target that is not a mount point. +func (mounter *Mounter) CanSafelySkipMountPointCheck() bool { return mounter.withSafeNotMountedBehavior } @@ -774,7 +775,7 @@ func (mounter *Mounter) IsMountPoint(file string) (bool, error) { } // tryUnmount calls plain "umount" and waits for unmountTimeout for it to finish. -func tryUnmount(path string, unmountTimeout time.Duration) error { +func tryUnmount(mounter *Mounter, path string, unmountTimeout time.Duration) error { klog.V(4).Infof("Unmounting %s", path) ctx, cancel := context.WithTimeout(context.Background(), unmountTimeout) defer cancel() @@ -789,6 +790,10 @@ func tryUnmount(path string, unmountTimeout time.Duration) error { } if cmderr != nil { + if mounter.withSafeNotMountedBehavior && strings.Contains(string(out), errNotMounted) { + klog.V(4).Infof("ignoring 'not mounted' error for %s", path) + return nil + } return fmt.Errorf("unmount failed: %v\nUnmounting arguments: %s\nOutput: %s", cmderr, path, string(out)) } return nil diff --git a/cluster-autoscaler/vendor/k8s.io/mount-utils/mount_unsupported.go b/cluster-autoscaler/vendor/k8s.io/mount-utils/mount_unsupported.go index 8e03d643b703..0345904a0102 100644 --- a/cluster-autoscaler/vendor/k8s.io/mount-utils/mount_unsupported.go +++ b/cluster-autoscaler/vendor/k8s.io/mount-utils/mount_unsupported.go @@ -74,8 +74,8 @@ func (mounter *Mounter) IsLikelyNotMountPoint(file string) (bool, error) { return true, errUnsupported } -// canSafelySkipMountPointCheck always returns false on unsupported platforms -func (mounter *Mounter) canSafelySkipMountPointCheck() bool { +// CanSafelySkipMountPointCheck always returns false on unsupported platforms +func (mounter *Mounter) CanSafelySkipMountPointCheck() bool { return false } diff --git a/cluster-autoscaler/vendor/k8s.io/mount-utils/mount_windows.go b/cluster-autoscaler/vendor/k8s.io/mount-utils/mount_windows.go index 7c7b396e5faf..7b2a2d1a15e0 100644 --- a/cluster-autoscaler/vendor/k8s.io/mount-utils/mount_windows.go +++ b/cluster-autoscaler/vendor/k8s.io/mount-utils/mount_windows.go @@ -244,8 +244,8 @@ func (mounter *Mounter) IsLikelyNotMountPoint(file string) (bool, error) { return true, nil } -// canSafelySkipMountPointCheck always returns false on Windows -func (mounter *Mounter) canSafelySkipMountPointCheck() bool { +// CanSafelySkipMountPointCheck always returns false on Windows +func (mounter *Mounter) CanSafelySkipMountPointCheck() bool { return false } diff --git a/cluster-autoscaler/vendor/k8s.io/mount-utils/resizefs_linux.go b/cluster-autoscaler/vendor/k8s.io/mount-utils/resizefs_linux.go index 929d061e52b8..81386fef878c 100644 --- a/cluster-autoscaler/vendor/k8s.io/mount-utils/resizefs_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/mount-utils/resizefs_linux.go @@ -28,6 +28,10 @@ import ( utilexec "k8s.io/utils/exec" ) +const ( + blockDev = "blockdev" +) + // ResizeFs Provides support for resizing file systems type ResizeFs struct { exec utilexec.Interface @@ -104,6 +108,17 @@ func (resizefs *ResizeFs) btrfsResize(deviceMountPath string) (bool, error) { } func (resizefs *ResizeFs) NeedResize(devicePath string, deviceMountPath string) (bool, error) { + // Do nothing if device is mounted as readonly + readonly, err := resizefs.getDeviceRO(devicePath) + if err != nil { + return false, err + } + + if readonly { + klog.V(3).Infof("ResizeFs.needResize - no resize possible since filesystem %s is readonly", devicePath) + return false, nil + } + deviceSize, err := resizefs.getDeviceSize(devicePath) if err != nil { return false, err @@ -147,7 +162,7 @@ func (resizefs *ResizeFs) NeedResize(devicePath string, deviceMountPath string) return true, nil } func (resizefs *ResizeFs) getDeviceSize(devicePath string) (uint64, error) { - output, err := resizefs.exec.Command("blockdev", "--getsize64", devicePath).CombinedOutput() + output, err := resizefs.exec.Command(blockDev, "--getsize64", devicePath).CombinedOutput() outStr := strings.TrimSpace(string(output)) if err != nil { return 0, fmt.Errorf("failed to read size of device %s: %s: %s", devicePath, err, outStr) @@ -159,6 +174,22 @@ func (resizefs *ResizeFs) getDeviceSize(devicePath string) (uint64, error) { return size, nil } +func (resizefs *ResizeFs) getDeviceRO(devicePath string) (bool, error) { + output, err := resizefs.exec.Command(blockDev, "--getro", devicePath).CombinedOutput() + outStr := strings.TrimSpace(string(output)) + if err != nil { + return false, fmt.Errorf("failed to get readonly bit from device %s: %s: %s", devicePath, err, outStr) + } + switch outStr { + case "0": + return false, nil + case "1": + return true, nil + default: + return false, fmt.Errorf("Failed readonly device check. Expected 1 or 0, got '%s'", outStr) + } +} + func (resizefs *ResizeFs) getExtSize(devicePath string) (uint64, uint64, error) { output, err := resizefs.exec.Command("dumpe2fs", "-h", devicePath).CombinedOutput() if err != nil { diff --git a/cluster-autoscaler/vendor/modules.txt b/cluster-autoscaler/vendor/modules.txt index 0a4a681206d9..52f904d35231 100644 --- a/cluster-autoscaler/vendor/modules.txt +++ b/cluster-autoscaler/vendor/modules.txt @@ -69,7 +69,7 @@ github.com/Microsoft/go-winio github.com/Microsoft/go-winio/pkg/guid github.com/Microsoft/go-winio/pkg/security github.com/Microsoft/go-winio/vhd -# github.com/Microsoft/hcsshim v0.8.22 +# github.com/Microsoft/hcsshim v0.8.25 ## explicit; go 1.13 github.com/Microsoft/hcsshim github.com/Microsoft/hcsshim/computestorage @@ -104,7 +104,7 @@ github.com/antlr/antlr4/runtime/Go/antlr # github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e ## explicit github.com/armon/circbuf -# github.com/aws/aws-sdk-go v1.44.116 +# github.com/aws/aws-sdk-go v1.44.147 ## explicit; go 1.11 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/awserr @@ -242,15 +242,15 @@ github.com/go-logr/logr/funcr # github.com/go-logr/stdr v1.2.2 ## explicit; go 1.16 github.com/go-logr/stdr -# github.com/go-openapi/jsonpointer v0.19.5 +# github.com/go-openapi/jsonpointer v0.19.6 ## explicit; go 1.13 github.com/go-openapi/jsonpointer -# github.com/go-openapi/jsonreference v0.20.0 +# github.com/go-openapi/jsonreference v0.20.1 ## explicit; go 1.13 github.com/go-openapi/jsonreference github.com/go-openapi/jsonreference/internal -# github.com/go-openapi/swag v0.19.14 -## explicit; go 1.11 +# github.com/go-openapi/swag v0.22.3 +## explicit; go 1.18 github.com/go-openapi/swag # github.com/godbus/dbus/v5 v5.0.6 ## explicit; go 1.12 @@ -285,9 +285,8 @@ github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp github.com/golang/protobuf/ptypes/wrappers -# github.com/google/cadvisor v0.46.0 +# github.com/google/cadvisor v0.47.1 ## explicit; go 1.16 -github.com/google/cadvisor/accelerators github.com/google/cadvisor/cache/memory github.com/google/cadvisor/collector github.com/google/cadvisor/container @@ -336,7 +335,7 @@ github.com/google/cadvisor/utils/sysfs github.com/google/cadvisor/utils/sysinfo github.com/google/cadvisor/version github.com/google/cadvisor/watcher -# github.com/google/cel-go v0.12.5 +# github.com/google/cel-go v0.12.6 ## explicit; go 1.17 github.com/google/cel-go/cel github.com/google/cel-go/checker @@ -377,7 +376,7 @@ github.com/google/go-querystring/query # github.com/google/gofuzz v1.1.0 ## explicit; go 1.12 github.com/google/gofuzz -# github.com/google/uuid v1.1.2 +# github.com/google/uuid v1.3.0 ## explicit github.com/google/uuid # github.com/googleapis/gax-go/v2 v2.1.1 @@ -423,7 +422,7 @@ github.com/libopenstorage/openstorage/volume # github.com/lithammer/dedent v1.1.0 ## explicit github.com/lithammer/dedent -# github.com/mailru/easyjson v0.7.6 +# github.com/mailru/easyjson v0.7.7 ## explicit; go 1.12 github.com/mailru/easyjson/buffer github.com/mailru/easyjson/jlexer @@ -431,17 +430,14 @@ github.com/mailru/easyjson/jwriter # github.com/matttproud/golang_protobuf_extensions v1.0.2 ## explicit; go 1.9 github.com/matttproud/golang_protobuf_extensions/pbutil -# github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989 -## explicit -github.com/mindprince/gonvml # github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible ## explicit github.com/mistifyio/go-zfs # github.com/mitchellh/go-homedir v1.1.0 ## explicit github.com/mitchellh/go-homedir -# github.com/moby/ipvs v1.0.1 -## explicit; go 1.13 +# github.com/moby/ipvs v1.1.0 +## explicit; go 1.17 github.com/moby/ipvs # github.com/moby/spdystream v0.2.0 ## explicit; go 1.13 @@ -498,7 +494,7 @@ github.com/opencontainers/runc/libcontainer/user github.com/opencontainers/runc/libcontainer/userns github.com/opencontainers/runc/libcontainer/utils github.com/opencontainers/runc/types -# github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 +# github.com/opencontainers/runtime-spec v1.0.3-0.20220909204839-494a5a6aca78 ## explicit github.com/opencontainers/runtime-spec/specs-go # github.com/opencontainers/selinux v1.10.0 @@ -543,7 +539,7 @@ github.com/satori/go.uuid # github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 ## explicit; go 1.14 github.com/seccomp/libseccomp-golang -# github.com/sirupsen/logrus v1.8.1 +# github.com/sirupsen/logrus v1.9.0 ## explicit; go 1.13 github.com/sirupsen/logrus # github.com/spf13/cobra v1.6.0 @@ -555,10 +551,10 @@ github.com/spf13/pflag # github.com/stoewer/go-strcase v1.2.0 ## explicit; go 1.11 github.com/stoewer/go-strcase -# github.com/stretchr/objx v0.4.0 +# github.com/stretchr/objx v0.5.0 ## explicit; go 1.12 github.com/stretchr/objx -# github.com/stretchr/testify v1.8.0 +# github.com/stretchr/testify v1.8.1 ## explicit; go 1.13 github.com/stretchr/testify/assert github.com/stretchr/testify/mock @@ -571,12 +567,15 @@ github.com/syndtr/gocapability/capability ## explicit; go 1.12 github.com/vishvananda/netlink github.com/vishvananda/netlink/nl -# github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae +# github.com/vishvananda/netns v0.0.2 ## explicit; go 1.12 github.com/vishvananda/netns -# github.com/vmware/govmomi v0.20.3 -## explicit +# github.com/vmware/govmomi v0.30.0 +## explicit; go 1.17 github.com/vmware/govmomi/find +github.com/vmware/govmomi/history +github.com/vmware/govmomi/internal +github.com/vmware/govmomi/internal/version github.com/vmware/govmomi/list github.com/vmware/govmomi/lookup github.com/vmware/govmomi/lookup/methods @@ -588,12 +587,14 @@ github.com/vmware/govmomi/pbm/methods github.com/vmware/govmomi/pbm/types github.com/vmware/govmomi/property github.com/vmware/govmomi/session +github.com/vmware/govmomi/session/keepalive github.com/vmware/govmomi/sts github.com/vmware/govmomi/sts/internal github.com/vmware/govmomi/task github.com/vmware/govmomi/vapi/internal github.com/vmware/govmomi/vapi/rest github.com/vmware/govmomi/vapi/tags +github.com/vmware/govmomi/view github.com/vmware/govmomi/vim25 github.com/vmware/govmomi/vim25/debug github.com/vmware/govmomi/vim25/methods @@ -729,7 +730,7 @@ golang.org/x/crypto/nacl/secretbox golang.org/x/crypto/pkcs12 golang.org/x/crypto/pkcs12/internal/rc2 golang.org/x/crypto/salsa20/salsa -# golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 +# golang.org/x/net v0.4.0 ## explicit; go 1.17 golang.org/x/net/bpf golang.org/x/net/context @@ -755,7 +756,7 @@ golang.org/x/oauth2/google/internal/externalaccount golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 +# golang.org/x/sync v0.1.0 ## explicit golang.org/x/sync/singleflight # golang.org/x/sys v0.3.0 @@ -826,7 +827,7 @@ google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/protobuf/field_mask -# google.golang.org/grpc v1.49.0 +# google.golang.org/grpc v1.51.0 ## explicit; go 1.17 google.golang.org/grpc google.golang.org/grpc/attributes @@ -939,7 +940,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.26.0 => k8s.io/api v0.26.0 +# k8s.io/api v0.27.0-alpha.1 => k8s.io/api v0.27.0-alpha.1 ## explicit; go 1.19 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -994,7 +995,7 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apimachinery v0.26.0 => k8s.io/apimachinery v0.26.1-rc.0 +# k8s.io/apimachinery v0.27.0-alpha.1 => k8s.io/apimachinery v0.27.0-alpha.1 ## explicit; go 1.19 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -1054,7 +1055,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.26.0 => k8s.io/apiserver v0.26.0 +# k8s.io/apiserver v0.27.0-alpha.1 => k8s.io/apiserver v0.27.0-alpha.1 ## explicit; go 1.19 k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/cel @@ -1193,7 +1194,7 @@ k8s.io/apiserver/plugin/pkg/audit/truncate k8s.io/apiserver/plugin/pkg/audit/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook -# k8s.io/client-go v0.26.0 => k8s.io/client-go v0.26.0 +# k8s.io/client-go v0.27.0-alpha.1 => k8s.io/client-go v0.27.0-alpha.1 ## explicit; go 1.19 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 @@ -1487,6 +1488,7 @@ k8s.io/client-go/scale/scheme/extensionsv1beta1 k8s.io/client-go/testing k8s.io/client-go/tools/auth k8s.io/client-go/tools/cache +k8s.io/client-go/tools/cache/synctrack k8s.io/client-go/tools/clientcmd k8s.io/client-go/tools/clientcmd/api k8s.io/client-go/tools/clientcmd/api/latest @@ -1513,7 +1515,7 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.26.0 => k8s.io/cloud-provider v0.26.0 +# k8s.io/cloud-provider v0.27.0-alpha.1 => k8s.io/cloud-provider v0.27.0-alpha.1 ## explicit; go 1.19 k8s.io/cloud-provider k8s.io/cloud-provider/api @@ -1523,7 +1525,7 @@ k8s.io/cloud-provider/service/helpers k8s.io/cloud-provider/volume k8s.io/cloud-provider/volume/errors k8s.io/cloud-provider/volume/helpers -# k8s.io/component-base v0.26.0 => k8s.io/component-base v0.26.0 +# k8s.io/component-base v0.27.0-alpha.1 => k8s.io/component-base v0.27.0-alpha.1 ## explicit; go 1.19 k8s.io/component-base/cli/flag k8s.io/component-base/codec @@ -1535,6 +1537,7 @@ k8s.io/component-base/configz k8s.io/component-base/featuregate k8s.io/component-base/logs k8s.io/component-base/logs/api/v1 +k8s.io/component-base/logs/internal/setverbositylevel k8s.io/component-base/logs/klogflags k8s.io/component-base/logs/logreduction k8s.io/component-base/metrics @@ -1550,7 +1553,7 @@ k8s.io/component-base/tracing k8s.io/component-base/tracing/api/v1 k8s.io/component-base/version k8s.io/component-base/version/verflag -# k8s.io/component-helpers v0.26.0 => k8s.io/component-helpers v0.26.0 +# k8s.io/component-helpers v0.27.0-alpha.1 => k8s.io/component-helpers v0.27.0-alpha.1 ## explicit; go 1.19 k8s.io/component-helpers/apimachinery/lease k8s.io/component-helpers/node/topology @@ -1560,16 +1563,16 @@ k8s.io/component-helpers/scheduling/corev1 k8s.io/component-helpers/scheduling/corev1/nodeaffinity k8s.io/component-helpers/storage/ephemeral k8s.io/component-helpers/storage/volume -# k8s.io/cri-api v0.0.0 => k8s.io/cri-api v0.26.1-rc.0 +# k8s.io/cri-api v0.0.0 => k8s.io/cri-api v0.27.0-alpha.1 ## explicit; go 1.19 k8s.io/cri-api/pkg/apis k8s.io/cri-api/pkg/apis/runtime/v1 k8s.io/cri-api/pkg/errors -# k8s.io/csi-translation-lib v0.26.0 => k8s.io/csi-translation-lib v0.26.0 +# k8s.io/csi-translation-lib v0.27.0-alpha.1 => k8s.io/csi-translation-lib v0.27.0-alpha.1 ## explicit; go 1.19 k8s.io/csi-translation-lib k8s.io/csi-translation-lib/plugins -# k8s.io/dynamic-resource-allocation v0.0.0 => k8s.io/dynamic-resource-allocation v0.26.0 +# k8s.io/dynamic-resource-allocation v0.0.0 => k8s.io/dynamic-resource-allocation v0.27.0-alpha.1 ## explicit; go 1.19 k8s.io/dynamic-resource-allocation/resourceclaim # k8s.io/klog/v2 v2.80.1 @@ -1580,11 +1583,12 @@ k8s.io/klog/v2/internal/clock k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity -# k8s.io/kms v0.26.0 => k8s.io/kms v0.26.1-rc.0 +# k8s.io/kms v0.27.0-alpha.1 => k8s.io/kms v0.27.0-alpha.1 ## explicit; go 1.19 k8s.io/kms/apis/v1beta1 k8s.io/kms/apis/v2alpha1 -# k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 +k8s.io/kms/service +# k8s.io/kube-openapi v0.0.0-20230109183929-3758b55a6596 ## explicit; go 1.18 k8s.io/kube-openapi/pkg/builder k8s.io/kube-openapi/pkg/builder3 @@ -1603,19 +1607,19 @@ k8s.io/kube-openapi/pkg/spec3 k8s.io/kube-openapi/pkg/util k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/validation/spec -# k8s.io/kube-proxy v0.0.0 => k8s.io/kube-proxy v0.26.0 +# k8s.io/kube-proxy v0.0.0 => k8s.io/kube-proxy v0.27.0-alpha.1 ## explicit; go 1.19 k8s.io/kube-proxy/config/v1alpha1 -# k8s.io/kube-scheduler v0.0.0 => k8s.io/kube-scheduler v0.26.0 +# k8s.io/kube-scheduler v0.0.0 => k8s.io/kube-scheduler v0.27.0-alpha.1 ## explicit; go 1.19 k8s.io/kube-scheduler/config/v1 k8s.io/kube-scheduler/config/v1beta2 k8s.io/kube-scheduler/config/v1beta3 k8s.io/kube-scheduler/extender/v1 -# k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.26.0 +# k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.27.0-alpha.1 ## explicit; go 1.19 k8s.io/kubectl/pkg/scale -# k8s.io/kubelet v0.26.0 => k8s.io/kubelet v0.26.0 +# k8s.io/kubelet v0.27.0-alpha.1 => k8s.io/kubelet v0.27.0-alpha.1 ## explicit; go 1.19 k8s.io/kubelet/config/v1 k8s.io/kubelet/config/v1alpha1 @@ -1632,7 +1636,7 @@ k8s.io/kubelet/pkg/apis/pluginregistration/v1 k8s.io/kubelet/pkg/apis/podresources/v1 k8s.io/kubelet/pkg/apis/podresources/v1alpha1 k8s.io/kubelet/pkg/apis/stats/v1alpha1 -# k8s.io/kubernetes v1.26.0 +# k8s.io/kubernetes v1.27.0-alpha.1 ## explicit; go 1.19 k8s.io/kubernetes/cmd/kube-proxy/app k8s.io/kubernetes/cmd/kubelet/app @@ -1902,7 +1906,7 @@ k8s.io/kubernetes/pkg/volume/vsphere_volume k8s.io/kubernetes/pkg/windows/service k8s.io/kubernetes/test/utils k8s.io/kubernetes/third_party/forked/golang/expansion -# k8s.io/legacy-cloud-providers v0.0.0 => k8s.io/legacy-cloud-providers v0.26.0 +# k8s.io/legacy-cloud-providers v0.0.0 => k8s.io/legacy-cloud-providers v0.27.0-alpha.1 ## explicit; go 1.19 k8s.io/legacy-cloud-providers/aws k8s.io/legacy-cloud-providers/azure @@ -1945,7 +1949,7 @@ k8s.io/legacy-cloud-providers/gce/gcpcredential k8s.io/legacy-cloud-providers/vsphere k8s.io/legacy-cloud-providers/vsphere/vclib k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers -# k8s.io/mount-utils v0.26.0-alpha.0 => k8s.io/mount-utils v0.26.1-rc.0 +# k8s.io/mount-utils v0.26.0-alpha.0 => k8s.io/mount-utils v0.27.0-alpha.1 ## explicit; go 1.19 k8s.io/mount-utils # k8s.io/utils v0.0.0-20221107191617-1a15be271d1d @@ -1968,9 +1972,11 @@ k8s.io/utils/pointer k8s.io/utils/strings k8s.io/utils/strings/slices k8s.io/utils/trace -# sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.33 +# sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.1 ## explicit; go 1.17 sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client +sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics +sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client # sigs.k8s.io/cloud-provider-azure v1.24.2 ## explicit; go 1.18 @@ -2041,32 +2047,33 @@ sigs.k8s.io/yaml # github.com/aws/aws-sdk-go/service/eks => github.com/aws/aws-sdk-go/service/eks v1.38.49 # github.com/digitalocean/godo => github.com/digitalocean/godo v1.27.0 # github.com/rancher/go-rancher => github.com/rancher/go-rancher v0.1.0 -# k8s.io/api => k8s.io/api v0.26.0 -# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.26.0 -# k8s.io/apimachinery => k8s.io/apimachinery v0.26.1-rc.0 -# k8s.io/apiserver => k8s.io/apiserver v0.26.0 -# k8s.io/cli-runtime => k8s.io/cli-runtime v0.26.0 -# k8s.io/client-go => k8s.io/client-go v0.26.0 -# k8s.io/cloud-provider => k8s.io/cloud-provider v0.26.0 -# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.26.0 -# k8s.io/code-generator => k8s.io/code-generator v0.26.1-rc.0 -# k8s.io/component-base => k8s.io/component-base v0.26.0 -# k8s.io/component-helpers => k8s.io/component-helpers v0.26.0 -# k8s.io/controller-manager => k8s.io/controller-manager v0.26.0 -# k8s.io/cri-api => k8s.io/cri-api v0.26.1-rc.0 -# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.26.0 -# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.26.0 -# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.26.0 -# k8s.io/kube-proxy => k8s.io/kube-proxy v0.26.0 -# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.26.0 -# k8s.io/kubectl => k8s.io/kubectl v0.26.0 -# k8s.io/kubelet => k8s.io/kubelet v0.26.0 -# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.26.0 -# k8s.io/metrics => k8s.io/metrics v0.26.0 -# k8s.io/mount-utils => k8s.io/mount-utils v0.26.1-rc.0 -# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.26.0 -# k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.26.0 -# k8s.io/sample-controller => k8s.io/sample-controller v0.26.0 -# k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.26.0 -# k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.26.0 -# k8s.io/kms => k8s.io/kms v0.26.1-rc.0 +# k8s.io/api => k8s.io/api v0.27.0-alpha.1 +# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.27.0-alpha.1 +# k8s.io/apimachinery => k8s.io/apimachinery v0.27.0-alpha.1 +# k8s.io/apiserver => k8s.io/apiserver v0.27.0-alpha.1 +# k8s.io/cli-runtime => k8s.io/cli-runtime v0.27.0-alpha.1 +# k8s.io/client-go => k8s.io/client-go v0.27.0-alpha.1 +# k8s.io/cloud-provider => k8s.io/cloud-provider v0.27.0-alpha.1 +# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.27.0-alpha.1 +# k8s.io/code-generator => k8s.io/code-generator v0.27.0-alpha.1 +# k8s.io/component-base => k8s.io/component-base v0.27.0-alpha.1 +# k8s.io/component-helpers => k8s.io/component-helpers v0.27.0-alpha.1 +# k8s.io/controller-manager => k8s.io/controller-manager v0.27.0-alpha.1 +# k8s.io/cri-api => k8s.io/cri-api v0.27.0-alpha.1 +# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.27.0-alpha.1 +# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.27.0-alpha.1 +# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.27.0-alpha.1 +# k8s.io/kube-proxy => k8s.io/kube-proxy v0.27.0-alpha.1 +# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.27.0-alpha.1 +# k8s.io/kubectl => k8s.io/kubectl v0.27.0-alpha.1 +# k8s.io/kubelet => k8s.io/kubelet v0.27.0-alpha.1 +# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.27.0-alpha.1 +# k8s.io/metrics => k8s.io/metrics v0.27.0-alpha.1 +# k8s.io/mount-utils => k8s.io/mount-utils v0.27.0-alpha.1 +# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.27.0-alpha.1 +# k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.27.0-alpha.1 +# k8s.io/sample-controller => k8s.io/sample-controller v0.27.0-alpha.1 +# k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.27.0-alpha.1 +# k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.27.0-alpha.1 +# k8s.io/kms => k8s.io/kms v0.27.0-alpha.1 +# k8s.io/noderesourcetopology-api => k8s.io/noderesourcetopology-api v0.27.0-alpha.1 diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/client.go b/cluster-autoscaler/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/client.go index c55070ae6378..68a3ebf12c56 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/client.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/client.go @@ -29,6 +29,9 @@ import ( "google.golang.org/grpc" "k8s.io/klog/v2" + + "sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics" + commonmetrics "sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics" "sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client" ) @@ -115,6 +118,8 @@ func (cm *connectionManager) closeAll() { // grpcTunnel implements Tunnel type grpcTunnel struct { stream client.ProxyService_ProxyClient + sendLock sync.Mutex + recvLock sync.Mutex clientConn clientConn pendingDial pendingDialManager conns connectionManager @@ -131,6 +136,9 @@ type grpcTunnel struct { // closing should only be accessed through atomic methods. // TODO: switch this to an atomic.Bool once the client is exclusively buit with go1.19+ closing uint32 + + // Stores the current metrics.ClientConnectionStatus + prevStatus atomic.Value } type clientConn interface { @@ -139,6 +147,11 @@ type clientConn interface { var _ clientConn = &grpc.ClientConn{} +var ( + // Expose metrics for client to register. + Metrics = metrics.Metrics +) + // CreateSingleUseGrpcTunnel creates a Tunnel to dial to a remote server through a // gRPC based proxy service. // Currently, a single tunnel supports a single connection, and the tunnel is closed when the connection is terminated @@ -177,7 +190,7 @@ func CreateSingleUseGrpcTunnelWithContext(createCtx, tunnelCtx context.Context, } func newUnstartedTunnel(stream client.ProxyService_ProxyClient, c clientConn) *grpcTunnel { - return &grpcTunnel{ + t := grpcTunnel{ stream: stream, clientConn: c, pendingDial: pendingDialManager{pendingDials: make(map[int64]pendingDial)}, @@ -185,6 +198,36 @@ func newUnstartedTunnel(stream client.ProxyService_ProxyClient, c clientConn) *g readTimeoutSeconds: 10, done: make(chan struct{}), } + s := metrics.ClientConnectionStatusCreated + t.prevStatus.Store(s) + metrics.Metrics.GetClientConnectionsMetric().WithLabelValues(string(s)).Inc() + return &t +} + +func (t *grpcTunnel) updateMetric(status metrics.ClientConnectionStatus) { + select { + case <-t.Done(): + return + default: + } + + prevStatus := t.prevStatus.Swap(status).(metrics.ClientConnectionStatus) + + m := metrics.Metrics.GetClientConnectionsMetric() + m.WithLabelValues(string(prevStatus)).Dec() + m.WithLabelValues(string(status)).Inc() +} + +// closeMetric should be called exactly once to finalize client_connections metric. +func (t *grpcTunnel) closeMetric() { + select { + case <-t.Done(): + return + default: + } + prevStatus := t.prevStatus.Load().(metrics.ClientConnectionStatus) + + metrics.Metrics.GetClientConnectionsMetric().WithLabelValues(string(prevStatus)).Dec() } func (t *grpcTunnel) serve(tunnelCtx context.Context) { @@ -196,19 +239,26 @@ func (t *grpcTunnel) serve(tunnelCtx context.Context) { // close any channels remaining for these connections. t.conns.closeAll() + t.closeMetric() + close(t.done) }() for { - pkt, err := t.stream.Recv() - if err == io.EOF || t.isClosing() { + pkt, err := t.Recv() + if err == io.EOF { return } + isClosing := t.isClosing() if err != nil || pkt == nil { - klog.ErrorS(err, "stream read failure") + if !isClosing { + klog.ErrorS(err, "stream read failure") + } + return + } + if isClosing { return } - klog.V(5).InfoS("[tracing] recv packet", "type", pkt.Type) switch pkt.Type { @@ -222,13 +272,19 @@ func (t *grpcTunnel) serve(tunnelCtx context.Context) { // 2. grpcTunnel.DialContext() returned early due to a dial timeout or the client canceling the context // // In either scenario, we should return here and close the tunnel as it is no longer needed. - klog.V(1).InfoS("DialResp not recognized; dropped", "connectionID", resp.ConnectID, "dialID", resp.Random) + kvs := []interface{}{"dialID", resp.Random, "connectID", resp.ConnectID} + if resp.Error != "" { + kvs = append(kvs, "error", resp.Error) + } + klog.V(1).InfoS("DialResp not recognized; dropped", kvs...) return } result := dialResult{connid: resp.ConnectID} if resp.Error != "" { - result.err = &dialFailure{resp.Error, DialFailureEndpoint} + result.err = &dialFailure{resp.Error, metrics.DialFailureEndpoint} + } else { + t.updateMetric(metrics.ClientConnectionStatusOk) } select { // try to send to the result channel @@ -263,7 +319,7 @@ func (t *grpcTunnel) serve(tunnelCtx context.Context) { klog.V(1).InfoS("DIAL_CLS after dial finished", "dialID", resp.Random) } else { result := dialResult{ - err: &dialFailure{"dial closed", DialFailureDialClosed}, + err: &dialFailure{"dial closed", metrics.DialFailureDialClosed}, } select { case pendingDial.resultCh <- result: @@ -278,11 +334,23 @@ func (t *grpcTunnel) serve(tunnelCtx context.Context) { case client.PacketType_DATA: resp := pkt.GetData() + if resp.ConnectID == 0 { + klog.ErrorS(nil, "Received packet missing ConnectID", "packetType", "DATA") + continue + } // TODO: flow control conn, ok := t.conns.get(resp.ConnectID) if !ok { - klog.V(1).InfoS("Connection not recognized", "connectionID", resp.ConnectID) + klog.ErrorS(nil, "Connection not recognized", "connectionID", resp.ConnectID, "packetType", "DATA") + t.Send(&client.Packet{ + Type: client.PacketType_CLOSE_REQ, + Payload: &client.Packet_CloseRequest{ + CloseRequest: &client.CloseRequest{ + ConnectID: resp.ConnectID, + }, + }, + }) continue } timer := time.NewTimer((time.Duration)(t.readTimeoutSeconds) * time.Second) @@ -301,7 +369,7 @@ func (t *grpcTunnel) serve(tunnelCtx context.Context) { conn, ok := t.conns.get(resp.ConnectID) if !ok { - klog.V(1).InfoS("Connection not recognized", "connectionID", resp.ConnectID) + klog.V(1).InfoS("Connection not recognized", "connectionID", resp.ConnectID, "packetType", "CLOSE_RSP") continue } close(conn.readCh) @@ -316,6 +384,15 @@ func (t *grpcTunnel) serve(tunnelCtx context.Context) { // Dial connects to the address on the named network, similar to // what net.Dial does. The only supported protocol is tcp. func (t *grpcTunnel) DialContext(requestCtx context.Context, protocol, address string) (net.Conn, error) { + conn, err := t.dialContext(requestCtx, protocol, address) + if err != nil { + _, reason := GetDialFailureReason(err) + metrics.Metrics.ObserveDialFailure(reason) + } + return conn, err +} + +func (t *grpcTunnel) dialContext(requestCtx context.Context, protocol, address string) (net.Conn, error) { select { case <-t.done: return nil, errors.New("tunnel is closed") @@ -326,6 +403,8 @@ func (t *grpcTunnel) DialContext(requestCtx context.Context, protocol, address s return nil, errors.New("protocol not supported") } + t.updateMetric(metrics.ClientConnectionStatusDialing) + random := rand.Int63() /* #nosec G404 */ // This channel is closed once we're returning and no longer waiting on resultCh @@ -350,7 +429,7 @@ func (t *grpcTunnel) DialContext(requestCtx context.Context, protocol, address s } klog.V(5).InfoS("[tracing] send packet", "type", req.Type) - err := t.stream.Send(req) + err := t.Send(req) if err != nil { return nil, err } @@ -358,7 +437,7 @@ func (t *grpcTunnel) DialContext(requestCtx context.Context, protocol, address s klog.V(5).Infoln("DIAL_REQ sent to proxy server") c := &conn{ - stream: t.stream, + tunnel: t, random: random, closeTunnel: t.closeTunnel, } @@ -375,14 +454,14 @@ func (t *grpcTunnel) DialContext(requestCtx context.Context, protocol, address s case <-time.After(30 * time.Second): klog.V(5).InfoS("Timed out waiting for DialResp", "dialID", random) go t.closeDial(random) - return nil, &dialFailure{"dial timeout, backstop", DialFailureTimeout} + return nil, &dialFailure{"dial timeout, backstop", metrics.DialFailureTimeout} case <-requestCtx.Done(): klog.V(5).InfoS("Context canceled waiting for DialResp", "ctxErr", requestCtx.Err(), "dialID", random) go t.closeDial(random) - return nil, &dialFailure{"dial timeout, context", DialFailureContext} + return nil, &dialFailure{"dial timeout, context", metrics.DialFailureContext} case <-t.done: klog.V(5).InfoS("Tunnel closed while waiting for DialResp", "dialID", random) - return nil, &dialFailure{"tunnel closed", DialFailureTunnelClosed} + return nil, &dialFailure{"tunnel closed", metrics.DialFailureTunnelClosed} } return c, nil @@ -402,7 +481,7 @@ func (t *grpcTunnel) closeDial(dialID int64) { }, }, } - if err := t.stream.Send(req); err != nil { + if err := t.Send(req); err != nil { klog.V(5).InfoS("Failed to send DIAL_CLS", "err", err, "dialID", dialID) } t.closeTunnel() @@ -417,38 +496,48 @@ func (t *grpcTunnel) isClosing() bool { return atomic.LoadUint32(&t.closing) != 0 } -func GetDialFailureReason(err error) (isDialFailure bool, reason DialFailureReason) { +func (t *grpcTunnel) Send(pkt *client.Packet) error { + t.sendLock.Lock() + defer t.sendLock.Unlock() + + const segment = commonmetrics.SegmentFromClient + metrics.Metrics.ObservePacket(segment, pkt.Type) + err := t.stream.Send(pkt) + if err != nil && err != io.EOF { + metrics.Metrics.ObserveStreamError(segment, err, pkt.Type) + } + return err +} + +func (t *grpcTunnel) Recv() (*client.Packet, error) { + t.recvLock.Lock() + defer t.recvLock.Unlock() + + const segment = commonmetrics.SegmentToClient + pkt, err := t.stream.Recv() + if err != nil && err != io.EOF { + metrics.Metrics.ObserveStreamErrorNoPacket(segment, err) + } + if err != nil { + return pkt, err + } + metrics.Metrics.ObservePacket(segment, pkt.Type) + return pkt, nil +} + +func GetDialFailureReason(err error) (isDialFailure bool, reason metrics.DialFailureReason) { var df *dialFailure if errors.As(err, &df) { return true, df.reason } - return false, DialFailureUnknown + return false, metrics.DialFailureUnknown } type dialFailure struct { msg string - reason DialFailureReason + reason metrics.DialFailureReason } func (df *dialFailure) Error() string { return df.msg } - -type DialFailureReason string - -const ( - DialFailureUnknown DialFailureReason = "unknown" - // DialFailureTimeout indicates the hard 30 second timeout was hit. - DialFailureTimeout DialFailureReason = "timeout" - // DialFailureContext indicates that the context was cancelled or reached it's deadline before - // the dial response was returned. - DialFailureContext DialFailureReason = "context" - // DialFailureEndpoint indicates that the konnectivity-agent was unable to reach the backend endpoint. - DialFailureEndpoint DialFailureReason = "endpoint" - // DialFailureDialClosed indicates that the client received a CloseDial response, indicating the - // connection was closed before the dial could complete. - DialFailureDialClosed DialFailureReason = "dialclosed" - // DialFailureTunnelClosed indicates that the client connection was closed before the dial could - // complete. - DialFailureTunnelClosed DialFailureReason = "tunnelclosed" -) diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/conn.go b/cluster-autoscaler/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/conn.go index f76b1e37a474..f4d3f788652d 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/conn.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/conn.go @@ -23,6 +23,7 @@ import ( "time" "k8s.io/klog/v2" + "sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client" ) @@ -35,7 +36,7 @@ var errConnCloseTimeout = errors.New("close timeout") // conn is an implementation of net.Conn, where the data is transported // over an established tunnel defined by a gRPC service ProxyService. type conn struct { - stream client.ProxyService_ProxyClient + tunnel *grpcTunnel connID int64 random int64 readCh chan []byte @@ -62,7 +63,7 @@ func (c *conn) Write(data []byte) (n int, err error) { klog.V(5).InfoS("[tracing] send req", "type", req.Type) - err = c.stream.Send(req) + err = c.tunnel.Send(req) if err != nil { return 0, err } @@ -147,7 +148,7 @@ func (c *conn) Close() error { klog.V(5).InfoS("[tracing] send req", "type", req.Type) - if err := c.stream.Send(req); err != nil { + if err := c.tunnel.Send(req); err != nil { return err } diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics/metrics.go b/cluster-autoscaler/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics/metrics.go new file mode 100644 index 000000000000..03e9d94da896 --- /dev/null +++ b/cluster-autoscaler/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics/metrics.go @@ -0,0 +1,162 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "sync" + + "github.com/prometheus/client_golang/prometheus" + + commonmetrics "sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics" + "sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client" +) + +const ( + Namespace = "konnectivity_network_proxy" + Subsystem = "client" +) + +var ( + // Metrics provides access to all client metrics. The client + // application is responsible for registering (via Metrics.RegisterMetrics). + Metrics = newMetrics() +) + +// ClientMetrics includes all the metrics of the konnectivity-client. +type ClientMetrics struct { + registerOnce sync.Once + streamPackets *prometheus.CounterVec + streamErrors *prometheus.CounterVec + dialFailures *prometheus.CounterVec + clientConns *prometheus.GaugeVec +} + +type DialFailureReason string + +const ( + DialFailureUnknown DialFailureReason = "unknown" + // DialFailureTimeout indicates the hard 30 second timeout was hit. + DialFailureTimeout DialFailureReason = "timeout" + // DialFailureContext indicates that the context was cancelled or reached it's deadline before + // the dial response was returned. + DialFailureContext DialFailureReason = "context" + // DialFailureEndpoint indicates that the konnectivity-agent was unable to reach the backend endpoint. + DialFailureEndpoint DialFailureReason = "endpoint" + // DialFailureDialClosed indicates that the client received a CloseDial response, indicating the + // connection was closed before the dial could complete. + DialFailureDialClosed DialFailureReason = "dialclosed" + // DialFailureTunnelClosed indicates that the client connection was closed before the dial could + // complete. + DialFailureTunnelClosed DialFailureReason = "tunnelclosed" +) + +type ClientConnectionStatus string + +const ( + // The connection is created but has not yet been dialed. + ClientConnectionStatusCreated ClientConnectionStatus = "created" + // The connection is pending dial response. + ClientConnectionStatusDialing ClientConnectionStatus = "dialing" + // The connection is established. + ClientConnectionStatusOk ClientConnectionStatus = "ok" + // The connection is closing. + ClientConnectionStatusClosing ClientConnectionStatus = "closing" +) + +func newMetrics() *ClientMetrics { + // The denominator (total dials started) for both + // dial_failure_total and dial_duration_seconds is the + // stream_packets_total (common metric), where segment is + // "from_client" and packet_type is "DIAL_REQ". + dialFailures := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: Namespace, + Subsystem: Subsystem, + Name: "dial_failure_total", + Help: "Number of dial failures observed, by reason (example: remote endpoint error)", + }, + []string{ + "reason", + }, + ) + clientConns := prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: Namespace, + Subsystem: Subsystem, + Name: "client_connections", + Help: "Number of open client connections, by status (Example: dialing)", + }, + []string{ + "status", + }, + ) + return &ClientMetrics{ + streamPackets: commonmetrics.MakeStreamPacketsTotalMetric(Namespace, Subsystem), + streamErrors: commonmetrics.MakeStreamErrorsTotalMetric(Namespace, Subsystem), + dialFailures: dialFailures, + clientConns: clientConns, + } +} + +// RegisterMetrics registers all metrics with the client application. +func (c *ClientMetrics) RegisterMetrics(r prometheus.Registerer) { + c.registerOnce.Do(func() { + r.MustRegister(c.streamPackets) + r.MustRegister(c.streamErrors) + r.MustRegister(c.dialFailures) + r.MustRegister(c.clientConns) + }) +} + +// LegacyRegisterMetrics registers all metrics via MustRegister func. +// TODO: remove this once https://github.com/kubernetes/kubernetes/pull/114293 is available. +func (c *ClientMetrics) LegacyRegisterMetrics(mustRegisterFn func(...prometheus.Collector)) { + c.registerOnce.Do(func() { + mustRegisterFn(c.streamPackets) + mustRegisterFn(c.streamErrors) + mustRegisterFn(c.dialFailures) + mustRegisterFn(c.clientConns) + }) +} + +// Reset resets the metrics. +func (c *ClientMetrics) Reset() { + c.streamPackets.Reset() + c.streamErrors.Reset() + c.dialFailures.Reset() + c.clientConns.Reset() +} + +func (c *ClientMetrics) ObserveDialFailure(reason DialFailureReason) { + c.dialFailures.WithLabelValues(string(reason)).Inc() +} + +func (c *ClientMetrics) GetClientConnectionsMetric() *prometheus.GaugeVec { + return c.clientConns +} + +func (c *ClientMetrics) ObservePacket(segment commonmetrics.Segment, packetType client.PacketType) { + commonmetrics.ObservePacket(c.streamPackets, segment, packetType) +} + +func (c *ClientMetrics) ObserveStreamErrorNoPacket(segment commonmetrics.Segment, err error) { + commonmetrics.ObserveStreamErrorNoPacket(c.streamErrors, segment, err) +} + +func (c *ClientMetrics) ObserveStreamError(segment commonmetrics.Segment, err error, packetType client.PacketType) { + commonmetrics.ObserveStreamError(c.streamErrors, segment, err, packetType) +} diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics/metrics.go b/cluster-autoscaler/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics/metrics.go new file mode 100644 index 000000000000..e8619f472022 --- /dev/null +++ b/cluster-autoscaler/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics/metrics.go @@ -0,0 +1,78 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package metrics provides metric definitions and helpers used +// across konnectivity client, server, and agent. +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "google.golang.org/grpc/status" + + "sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client" +) + +// Segment identifies one of four tunnel segments (e.g. from server to agent). +type Segment string + +const ( + // SegmentFromClient indicates a packet from client to server. + SegmentFromClient Segment = "from_client" + // SegmentToClient indicates a packet from server to client. + SegmentToClient Segment = "to_client" + // SegmentFromAgent indicates a packet from agent to server. + SegmentFromAgent Segment = "from_agent" + // SegmentToAgent indicates a packet from server to agent. + SegmentToAgent Segment = "to_agent" +) + +func MakeStreamPacketsTotalMetric(namespace, subsystem string) *prometheus.CounterVec { + return prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "stream_packets_total", + Help: "Count of packets processed, by segment and packet type (example: from_client, DIAL_REQ)", + }, + []string{"segment", "packet_type"}, + ) +} + +func MakeStreamErrorsTotalMetric(namespace, subsystem string) *prometheus.CounterVec { + return prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "stream_errors_total", + Help: "Count of gRPC stream errors, by segment, grpc Code, packet type. (example: from_agent, Code.Unavailable, DIAL_RSP)", + }, + []string{"segment", "code", "packet_type"}, + ) +} + +func ObservePacket(m *prometheus.CounterVec, segment Segment, packetType client.PacketType) { + m.WithLabelValues(string(segment), packetType.String()).Inc() +} + +func ObserveStreamErrorNoPacket(m *prometheus.CounterVec, segment Segment, err error) { + code := status.Code(err) + m.WithLabelValues(string(segment), code.String(), "Unknown").Inc() +} + +func ObserveStreamError(m *prometheus.CounterVec, segment Segment, err error, packetType client.PacketType) { + code := status.Code(err) + m.WithLabelValues(string(segment), code.String(), packetType.String()).Inc() +} diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.pb.go b/cluster-autoscaler/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.pb.go index 59a797df4cf7..6af92b448b06 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.pb.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.pb.go @@ -1,43 +1,38 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 +// Copyright The Kubernetes Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.12.4 // source: konnectivity-client/proto/client/client.proto package client import ( - context "context" - fmt "fmt" - proto "github.com/golang/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type PacketType int32 @@ -50,99 +45,156 @@ const ( PacketType_DIAL_CLS PacketType = 5 ) -var PacketType_name = map[int32]string{ - 0: "DIAL_REQ", - 1: "DIAL_RSP", - 2: "CLOSE_REQ", - 3: "CLOSE_RSP", - 4: "DATA", - 5: "DIAL_CLS", -} +// Enum value maps for PacketType. +var ( + PacketType_name = map[int32]string{ + 0: "DIAL_REQ", + 1: "DIAL_RSP", + 2: "CLOSE_REQ", + 3: "CLOSE_RSP", + 4: "DATA", + 5: "DIAL_CLS", + } + PacketType_value = map[string]int32{ + "DIAL_REQ": 0, + "DIAL_RSP": 1, + "CLOSE_REQ": 2, + "CLOSE_RSP": 3, + "DATA": 4, + "DIAL_CLS": 5, + } +) -var PacketType_value = map[string]int32{ - "DIAL_REQ": 0, - "DIAL_RSP": 1, - "CLOSE_REQ": 2, - "CLOSE_RSP": 3, - "DATA": 4, - "DIAL_CLS": 5, +func (x PacketType) Enum() *PacketType { + p := new(PacketType) + *p = x + return p } func (x PacketType) String() string { - return proto.EnumName(PacketType_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } -func (PacketType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_fec4258d9ecd175d, []int{0} +func (PacketType) Descriptor() protoreflect.EnumDescriptor { + return file_konnectivity_client_proto_client_client_proto_enumTypes[0].Descriptor() } -type Error int32 - -const ( - Error_EOF Error = 0 -) - -var Error_name = map[int32]string{ - 0: "EOF", -} - -var Error_value = map[string]int32{ - "EOF": 0, +func (PacketType) Type() protoreflect.EnumType { + return &file_konnectivity_client_proto_client_client_proto_enumTypes[0] } -func (x Error) String() string { - return proto.EnumName(Error_name, int32(x)) +func (x PacketType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) } -func (Error) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_fec4258d9ecd175d, []int{1} +// Deprecated: Use PacketType.Descriptor instead. +func (PacketType) EnumDescriptor() ([]byte, []int) { + return file_konnectivity_client_proto_client_client_proto_rawDescGZIP(), []int{0} } type Packet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Type PacketType `protobuf:"varint,1,opt,name=type,proto3,enum=PacketType" json:"type,omitempty"` - // Types that are valid to be assigned to Payload: + // Types that are assignable to Payload: + // // *Packet_DialRequest // *Packet_DialResponse // *Packet_Data // *Packet_CloseRequest // *Packet_CloseResponse // *Packet_CloseDial - Payload isPacket_Payload `protobuf_oneof:"payload"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Payload isPacket_Payload `protobuf_oneof:"payload"` +} + +func (x *Packet) Reset() { + *x = Packet{} + if protoimpl.UnsafeEnabled { + mi := &file_konnectivity_client_proto_client_client_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Packet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Packet) ProtoMessage() {} + +func (x *Packet) ProtoReflect() protoreflect.Message { + mi := &file_konnectivity_client_proto_client_client_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *Packet) Reset() { *m = Packet{} } -func (m *Packet) String() string { return proto.CompactTextString(m) } -func (*Packet) ProtoMessage() {} +// Deprecated: Use Packet.ProtoReflect.Descriptor instead. func (*Packet) Descriptor() ([]byte, []int) { - return fileDescriptor_fec4258d9ecd175d, []int{0} + return file_konnectivity_client_proto_client_client_proto_rawDescGZIP(), []int{0} } -func (m *Packet) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Packet.Unmarshal(m, b) +func (x *Packet) GetType() PacketType { + if x != nil { + return x.Type + } + return PacketType_DIAL_REQ } -func (m *Packet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Packet.Marshal(b, m, deterministic) + +func (m *Packet) GetPayload() isPacket_Payload { + if m != nil { + return m.Payload + } + return nil } -func (m *Packet) XXX_Merge(src proto.Message) { - xxx_messageInfo_Packet.Merge(m, src) + +func (x *Packet) GetDialRequest() *DialRequest { + if x, ok := x.GetPayload().(*Packet_DialRequest); ok { + return x.DialRequest + } + return nil } -func (m *Packet) XXX_Size() int { - return xxx_messageInfo_Packet.Size(m) + +func (x *Packet) GetDialResponse() *DialResponse { + if x, ok := x.GetPayload().(*Packet_DialResponse); ok { + return x.DialResponse + } + return nil } -func (m *Packet) XXX_DiscardUnknown() { - xxx_messageInfo_Packet.DiscardUnknown(m) + +func (x *Packet) GetData() *Data { + if x, ok := x.GetPayload().(*Packet_Data); ok { + return x.Data + } + return nil } -var xxx_messageInfo_Packet proto.InternalMessageInfo +func (x *Packet) GetCloseRequest() *CloseRequest { + if x, ok := x.GetPayload().(*Packet_CloseRequest); ok { + return x.CloseRequest + } + return nil +} -func (m *Packet) GetType() PacketType { - if m != nil { - return m.Type +func (x *Packet) GetCloseResponse() *CloseResponse { + if x, ok := x.GetPayload().(*Packet_CloseResponse); ok { + return x.CloseResponse } - return PacketType_DIAL_REQ + return nil +} + +func (x *Packet) GetCloseDial() *CloseDial { + if x, ok := x.GetPayload().(*Packet_CloseDial); ok { + return x.CloseDial + } + return nil } type isPacket_Payload interface { @@ -185,530 +237,584 @@ func (*Packet_CloseResponse) isPacket_Payload() {} func (*Packet_CloseDial) isPacket_Payload() {} -func (m *Packet) GetPayload() isPacket_Payload { - if m != nil { - return m.Payload - } - return nil -} - -func (m *Packet) GetDialRequest() *DialRequest { - if x, ok := m.GetPayload().(*Packet_DialRequest); ok { - return x.DialRequest - } - return nil -} - -func (m *Packet) GetDialResponse() *DialResponse { - if x, ok := m.GetPayload().(*Packet_DialResponse); ok { - return x.DialResponse - } - return nil -} +type DialRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Packet) GetData() *Data { - if x, ok := m.GetPayload().(*Packet_Data); ok { - return x.Data - } - return nil + // tcp or udp? + Protocol string `protobuf:"bytes,1,opt,name=protocol,proto3" json:"protocol,omitempty"` + // node:port + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + // random id for client, maybe should be longer + Random int64 `protobuf:"varint,3,opt,name=random,proto3" json:"random,omitempty"` } -func (m *Packet) GetCloseRequest() *CloseRequest { - if x, ok := m.GetPayload().(*Packet_CloseRequest); ok { - return x.CloseRequest +func (x *DialRequest) Reset() { + *x = DialRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_konnectivity_client_proto_client_client_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -func (m *Packet) GetCloseResponse() *CloseResponse { - if x, ok := m.GetPayload().(*Packet_CloseResponse); ok { - return x.CloseResponse - } - return nil +func (x *DialRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Packet) GetCloseDial() *CloseDial { - if x, ok := m.GetPayload().(*Packet_CloseDial); ok { - return x.CloseDial - } - return nil -} +func (*DialRequest) ProtoMessage() {} -// XXX_OneofWrappers is for the internal use of the proto package. -func (*Packet) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*Packet_DialRequest)(nil), - (*Packet_DialResponse)(nil), - (*Packet_Data)(nil), - (*Packet_CloseRequest)(nil), - (*Packet_CloseResponse)(nil), - (*Packet_CloseDial)(nil), +func (x *DialRequest) ProtoReflect() protoreflect.Message { + mi := &file_konnectivity_client_proto_client_client_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } + return mi.MessageOf(x) } -type DialRequest struct { - // tcp or udp? - Protocol string `protobuf:"bytes,1,opt,name=protocol,proto3" json:"protocol,omitempty"` - // node:port - Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` - // random id for client, maybe should be longer - Random int64 `protobuf:"varint,3,opt,name=random,proto3" json:"random,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DialRequest) Reset() { *m = DialRequest{} } -func (m *DialRequest) String() string { return proto.CompactTextString(m) } -func (*DialRequest) ProtoMessage() {} +// Deprecated: Use DialRequest.ProtoReflect.Descriptor instead. func (*DialRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_fec4258d9ecd175d, []int{1} -} - -func (m *DialRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DialRequest.Unmarshal(m, b) -} -func (m *DialRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DialRequest.Marshal(b, m, deterministic) -} -func (m *DialRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DialRequest.Merge(m, src) -} -func (m *DialRequest) XXX_Size() int { - return xxx_messageInfo_DialRequest.Size(m) -} -func (m *DialRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DialRequest.DiscardUnknown(m) + return file_konnectivity_client_proto_client_client_proto_rawDescGZIP(), []int{1} } -var xxx_messageInfo_DialRequest proto.InternalMessageInfo - -func (m *DialRequest) GetProtocol() string { - if m != nil { - return m.Protocol +func (x *DialRequest) GetProtocol() string { + if x != nil { + return x.Protocol } return "" } -func (m *DialRequest) GetAddress() string { - if m != nil { - return m.Address +func (x *DialRequest) GetAddress() string { + if x != nil { + return x.Address } return "" } -func (m *DialRequest) GetRandom() int64 { - if m != nil { - return m.Random +func (x *DialRequest) GetRandom() int64 { + if x != nil { + return x.Random } return 0 } type DialResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // error failed reason; enum? Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` // connectID indicates the identifier of the connection ConnectID int64 `protobuf:"varint,2,opt,name=connectID,proto3" json:"connectID,omitempty"` // random copied from DialRequest - Random int64 `protobuf:"varint,3,opt,name=random,proto3" json:"random,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Random int64 `protobuf:"varint,3,opt,name=random,proto3" json:"random,omitempty"` } -func (m *DialResponse) Reset() { *m = DialResponse{} } -func (m *DialResponse) String() string { return proto.CompactTextString(m) } -func (*DialResponse) ProtoMessage() {} -func (*DialResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_fec4258d9ecd175d, []int{2} +func (x *DialResponse) Reset() { + *x = DialResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_konnectivity_client_proto_client_client_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *DialResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DialResponse.Unmarshal(m, b) -} -func (m *DialResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DialResponse.Marshal(b, m, deterministic) -} -func (m *DialResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DialResponse.Merge(m, src) -} -func (m *DialResponse) XXX_Size() int { - return xxx_messageInfo_DialResponse.Size(m) +func (x *DialResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DialResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DialResponse.DiscardUnknown(m) + +func (*DialResponse) ProtoMessage() {} + +func (x *DialResponse) ProtoReflect() protoreflect.Message { + mi := &file_konnectivity_client_proto_client_client_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_DialResponse proto.InternalMessageInfo +// Deprecated: Use DialResponse.ProtoReflect.Descriptor instead. +func (*DialResponse) Descriptor() ([]byte, []int) { + return file_konnectivity_client_proto_client_client_proto_rawDescGZIP(), []int{2} +} -func (m *DialResponse) GetError() string { - if m != nil { - return m.Error +func (x *DialResponse) GetError() string { + if x != nil { + return x.Error } return "" } -func (m *DialResponse) GetConnectID() int64 { - if m != nil { - return m.ConnectID +func (x *DialResponse) GetConnectID() int64 { + if x != nil { + return x.ConnectID } return 0 } -func (m *DialResponse) GetRandom() int64 { - if m != nil { - return m.Random +func (x *DialResponse) GetRandom() int64 { + if x != nil { + return x.Random } return 0 } type CloseRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // connectID of the stream to close - ConnectID int64 `protobuf:"varint,1,opt,name=connectID,proto3" json:"connectID,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ConnectID int64 `protobuf:"varint,1,opt,name=connectID,proto3" json:"connectID,omitempty"` } -func (m *CloseRequest) Reset() { *m = CloseRequest{} } -func (m *CloseRequest) String() string { return proto.CompactTextString(m) } -func (*CloseRequest) ProtoMessage() {} -func (*CloseRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_fec4258d9ecd175d, []int{3} +func (x *CloseRequest) Reset() { + *x = CloseRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_konnectivity_client_proto_client_client_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *CloseRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CloseRequest.Unmarshal(m, b) -} -func (m *CloseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CloseRequest.Marshal(b, m, deterministic) -} -func (m *CloseRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CloseRequest.Merge(m, src) +func (x *CloseRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *CloseRequest) XXX_Size() int { - return xxx_messageInfo_CloseRequest.Size(m) -} -func (m *CloseRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CloseRequest.DiscardUnknown(m) + +func (*CloseRequest) ProtoMessage() {} + +func (x *CloseRequest) ProtoReflect() protoreflect.Message { + mi := &file_konnectivity_client_proto_client_client_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_CloseRequest proto.InternalMessageInfo +// Deprecated: Use CloseRequest.ProtoReflect.Descriptor instead. +func (*CloseRequest) Descriptor() ([]byte, []int) { + return file_konnectivity_client_proto_client_client_proto_rawDescGZIP(), []int{3} +} -func (m *CloseRequest) GetConnectID() int64 { - if m != nil { - return m.ConnectID +func (x *CloseRequest) GetConnectID() int64 { + if x != nil { + return x.ConnectID } return 0 } type CloseResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // error message Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` // connectID indicates the identifier of the connection - ConnectID int64 `protobuf:"varint,2,opt,name=connectID,proto3" json:"connectID,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ConnectID int64 `protobuf:"varint,2,opt,name=connectID,proto3" json:"connectID,omitempty"` } -func (m *CloseResponse) Reset() { *m = CloseResponse{} } -func (m *CloseResponse) String() string { return proto.CompactTextString(m) } -func (*CloseResponse) ProtoMessage() {} -func (*CloseResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_fec4258d9ecd175d, []int{4} +func (x *CloseResponse) Reset() { + *x = CloseResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_konnectivity_client_proto_client_client_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *CloseResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CloseResponse.Unmarshal(m, b) +func (x *CloseResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *CloseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CloseResponse.Marshal(b, m, deterministic) -} -func (m *CloseResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CloseResponse.Merge(m, src) -} -func (m *CloseResponse) XXX_Size() int { - return xxx_messageInfo_CloseResponse.Size(m) -} -func (m *CloseResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CloseResponse.DiscardUnknown(m) + +func (*CloseResponse) ProtoMessage() {} + +func (x *CloseResponse) ProtoReflect() protoreflect.Message { + mi := &file_konnectivity_client_proto_client_client_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_CloseResponse proto.InternalMessageInfo +// Deprecated: Use CloseResponse.ProtoReflect.Descriptor instead. +func (*CloseResponse) Descriptor() ([]byte, []int) { + return file_konnectivity_client_proto_client_client_proto_rawDescGZIP(), []int{4} +} -func (m *CloseResponse) GetError() string { - if m != nil { - return m.Error +func (x *CloseResponse) GetError() string { + if x != nil { + return x.Error } return "" } -func (m *CloseResponse) GetConnectID() int64 { - if m != nil { - return m.ConnectID +func (x *CloseResponse) GetConnectID() int64 { + if x != nil { + return x.ConnectID } return 0 } type CloseDial struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // random id of the DialRequest - Random int64 `protobuf:"varint,1,opt,name=random,proto3" json:"random,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Random int64 `protobuf:"varint,1,opt,name=random,proto3" json:"random,omitempty"` } -func (m *CloseDial) Reset() { *m = CloseDial{} } -func (m *CloseDial) String() string { return proto.CompactTextString(m) } -func (*CloseDial) ProtoMessage() {} -func (*CloseDial) Descriptor() ([]byte, []int) { - return fileDescriptor_fec4258d9ecd175d, []int{5} +func (x *CloseDial) Reset() { + *x = CloseDial{} + if protoimpl.UnsafeEnabled { + mi := &file_konnectivity_client_proto_client_client_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *CloseDial) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CloseDial.Unmarshal(m, b) -} -func (m *CloseDial) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CloseDial.Marshal(b, m, deterministic) +func (x *CloseDial) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *CloseDial) XXX_Merge(src proto.Message) { - xxx_messageInfo_CloseDial.Merge(m, src) -} -func (m *CloseDial) XXX_Size() int { - return xxx_messageInfo_CloseDial.Size(m) -} -func (m *CloseDial) XXX_DiscardUnknown() { - xxx_messageInfo_CloseDial.DiscardUnknown(m) + +func (*CloseDial) ProtoMessage() {} + +func (x *CloseDial) ProtoReflect() protoreflect.Message { + mi := &file_konnectivity_client_proto_client_client_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_CloseDial proto.InternalMessageInfo +// Deprecated: Use CloseDial.ProtoReflect.Descriptor instead. +func (*CloseDial) Descriptor() ([]byte, []int) { + return file_konnectivity_client_proto_client_client_proto_rawDescGZIP(), []int{5} +} -func (m *CloseDial) GetRandom() int64 { - if m != nil { - return m.Random +func (x *CloseDial) GetRandom() int64 { + if x != nil { + return x.Random } return 0 } type Data struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // connectID to connect to ConnectID int64 `protobuf:"varint,1,opt,name=connectID,proto3" json:"connectID,omitempty"` // error message if error happens Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` // stream data - Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` } -func (m *Data) Reset() { *m = Data{} } -func (m *Data) String() string { return proto.CompactTextString(m) } -func (*Data) ProtoMessage() {} -func (*Data) Descriptor() ([]byte, []int) { - return fileDescriptor_fec4258d9ecd175d, []int{6} +func (x *Data) Reset() { + *x = Data{} + if protoimpl.UnsafeEnabled { + mi := &file_konnectivity_client_proto_client_client_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Data) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Data.Unmarshal(m, b) +func (x *Data) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Data) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Data.Marshal(b, m, deterministic) -} -func (m *Data) XXX_Merge(src proto.Message) { - xxx_messageInfo_Data.Merge(m, src) -} -func (m *Data) XXX_Size() int { - return xxx_messageInfo_Data.Size(m) -} -func (m *Data) XXX_DiscardUnknown() { - xxx_messageInfo_Data.DiscardUnknown(m) + +func (*Data) ProtoMessage() {} + +func (x *Data) ProtoReflect() protoreflect.Message { + mi := &file_konnectivity_client_proto_client_client_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Data proto.InternalMessageInfo +// Deprecated: Use Data.ProtoReflect.Descriptor instead. +func (*Data) Descriptor() ([]byte, []int) { + return file_konnectivity_client_proto_client_client_proto_rawDescGZIP(), []int{6} +} -func (m *Data) GetConnectID() int64 { - if m != nil { - return m.ConnectID +func (x *Data) GetConnectID() int64 { + if x != nil { + return x.ConnectID } return 0 } -func (m *Data) GetError() string { - if m != nil { - return m.Error +func (x *Data) GetError() string { + if x != nil { + return x.Error } return "" } -func (m *Data) GetData() []byte { - if m != nil { - return m.Data +func (x *Data) GetData() []byte { + if x != nil { + return x.Data } return nil } -func init() { - proto.RegisterEnum("PacketType", PacketType_name, PacketType_value) - proto.RegisterEnum("Error", Error_name, Error_value) - proto.RegisterType((*Packet)(nil), "Packet") - proto.RegisterType((*DialRequest)(nil), "DialRequest") - proto.RegisterType((*DialResponse)(nil), "DialResponse") - proto.RegisterType((*CloseRequest)(nil), "CloseRequest") - proto.RegisterType((*CloseResponse)(nil), "CloseResponse") - proto.RegisterType((*CloseDial)(nil), "CloseDial") - proto.RegisterType((*Data)(nil), "Data") -} - -func init() { - proto.RegisterFile("konnectivity-client/proto/client/client.proto", fileDescriptor_fec4258d9ecd175d) -} - -var fileDescriptor_fec4258d9ecd175d = []byte{ - // 505 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0x51, 0x8b, 0xd3, 0x40, - 0x18, 0x4c, 0xda, 0xa4, 0x6d, 0xbe, 0xa6, 0x47, 0x58, 0x44, 0xc2, 0x29, 0xdc, 0x11, 0x5f, 0x4a, - 0xb1, 0xe9, 0xd1, 0x03, 0xf1, 0xb5, 0xd7, 0xf4, 0xe8, 0x41, 0xf1, 0xea, 0xf6, 0x9e, 0x4e, 0x50, - 0xd6, 0x64, 0x91, 0xd0, 0x98, 0x8d, 0xbb, 0x6b, 0x35, 0x3f, 0xd3, 0x7f, 0x24, 0xd9, 0xa4, 0x4d, - 0x22, 0xa8, 0x70, 0x4f, 0xed, 0xcc, 0x7e, 0x33, 0x3b, 0x19, 0xbe, 0x85, 0xe9, 0x9e, 0xa5, 0x29, - 0x0d, 0x65, 0x7c, 0x88, 0x65, 0x3e, 0x0d, 0x93, 0x98, 0xa6, 0x72, 0x96, 0x71, 0x26, 0xd9, 0xac, - 0x02, 0xe5, 0x8f, 0xaf, 0x38, 0xef, 0x57, 0x07, 0x7a, 0x5b, 0x12, 0xee, 0xa9, 0x44, 0x17, 0x60, - 0xc8, 0x3c, 0xa3, 0xae, 0x7e, 0xa9, 0x8f, 0xcf, 0xe6, 0x43, 0xbf, 0xa4, 0x1f, 0xf2, 0x8c, 0x62, - 0x75, 0x80, 0xae, 0x60, 0x18, 0xc5, 0x24, 0xc1, 0xf4, 0xdb, 0x77, 0x2a, 0xa4, 0xdb, 0xb9, 0xd4, - 0xc7, 0xc3, 0xb9, 0xed, 0x07, 0x35, 0xb7, 0xd6, 0x70, 0x73, 0x04, 0x5d, 0x83, 0x5d, 0x42, 0x91, - 0xb1, 0x54, 0x50, 0xb7, 0xab, 0x24, 0xa3, 0x4a, 0x52, 0x92, 0x6b, 0x0d, 0xb7, 0x86, 0xd0, 0x0b, - 0x30, 0x22, 0x22, 0x89, 0x6b, 0xa8, 0x61, 0xd3, 0x0f, 0x88, 0x24, 0x6b, 0x0d, 0x2b, 0xb2, 0x70, - 0x0c, 0x13, 0x26, 0xe8, 0x31, 0x84, 0x59, 0x39, 0x2e, 0x1b, 0x64, 0xe1, 0xd8, 0x1c, 0x42, 0x6f, - 0x60, 0x54, 0xe1, 0x2a, 0x47, 0x4f, 0xa9, 0xce, 0x8e, 0xaa, 0x53, 0x90, 0xf6, 0x18, 0x9a, 0x80, - 0xa5, 0x88, 0x22, 0xae, 0xdb, 0x57, 0x1a, 0x28, 0x35, 0x05, 0xb3, 0xd6, 0x70, 0x7d, 0x7c, 0x63, - 0x41, 0x3f, 0x23, 0x79, 0xc2, 0x48, 0xe4, 0x7d, 0x80, 0x61, 0xa3, 0x13, 0x74, 0x0e, 0x03, 0xd5, - 0x75, 0xc8, 0x12, 0xd5, 0xad, 0x85, 0x4f, 0x18, 0xb9, 0xd0, 0x27, 0x51, 0xc4, 0xa9, 0x10, 0xaa, - 0x4e, 0x0b, 0x1f, 0x21, 0x7a, 0x0e, 0x3d, 0x4e, 0xd2, 0x88, 0x7d, 0x55, 0xa5, 0x75, 0x71, 0x85, - 0xbc, 0x47, 0xb0, 0x9b, 0xed, 0xa1, 0x67, 0x60, 0x52, 0xce, 0x19, 0xaf, 0xac, 0x4b, 0x80, 0x5e, - 0x82, 0x15, 0x96, 0x7b, 0x70, 0x17, 0x28, 0xe7, 0x2e, 0xae, 0x89, 0xbf, 0x7a, 0xbf, 0x06, 0xbb, - 0xd9, 0x63, 0xdb, 0x45, 0xff, 0xc3, 0xc5, 0x5b, 0xc2, 0xa8, 0xd5, 0xdf, 0x53, 0xa2, 0x78, 0xaf, - 0xc0, 0x3a, 0x15, 0xda, 0xc8, 0xa5, 0xb7, 0x72, 0xbd, 0x03, 0xa3, 0x58, 0x82, 0x7f, 0xe7, 0xa9, - 0xaf, 0xef, 0x34, 0xaf, 0x47, 0xd5, 0x36, 0x15, 0x5f, 0x6a, 0x97, 0x4b, 0x34, 0xf9, 0x08, 0x50, - 0x2f, 0x37, 0xb2, 0x61, 0x10, 0xdc, 0x2d, 0x36, 0x9f, 0xf0, 0xea, 0xbd, 0xa3, 0xd5, 0x68, 0xb7, - 0x75, 0x74, 0x34, 0x02, 0x6b, 0xb9, 0xb9, 0xdf, 0xad, 0xd4, 0x61, 0xa7, 0x01, 0x77, 0x5b, 0xa7, - 0x8b, 0x06, 0x60, 0x04, 0x8b, 0x87, 0x85, 0x63, 0x9c, 0x54, 0xcb, 0xcd, 0xce, 0x31, 0x27, 0x0e, - 0x98, 0x2b, 0x75, 0x79, 0x1f, 0xba, 0xab, 0xfb, 0x5b, 0x47, 0x9b, 0xcf, 0xc0, 0xde, 0x72, 0xf6, - 0x33, 0xdf, 0x51, 0x7e, 0x88, 0x43, 0x8a, 0x2e, 0xc0, 0x54, 0x18, 0xf5, 0xab, 0x67, 0x76, 0x7e, - 0xfc, 0xe3, 0x69, 0x63, 0xfd, 0x4a, 0xbf, 0xb9, 0x7d, 0x0c, 0x44, 0xfc, 0x45, 0xf8, 0xfb, 0xb7, - 0xc2, 0x8f, 0xd9, 0x8c, 0x64, 0xb1, 0xa0, 0xfc, 0x40, 0xf9, 0x34, 0xa5, 0xf2, 0x07, 0xe3, 0xfb, - 0x69, 0x56, 0xc8, 0x67, 0xff, 0x7b, 0xec, 0x9f, 0x7b, 0x0a, 0x5d, 0xff, 0x0e, 0x00, 0x00, 0xff, - 0xff, 0x38, 0x1b, 0xf6, 0x4f, 0x17, 0x04, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// ProxyServiceClient is the client API for ProxyService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ProxyServiceClient interface { - Proxy(ctx context.Context, opts ...grpc.CallOption) (ProxyService_ProxyClient, error) -} - -type proxyServiceClient struct { - cc *grpc.ClientConn -} - -func NewProxyServiceClient(cc *grpc.ClientConn) ProxyServiceClient { - return &proxyServiceClient{cc} -} +var File_konnectivity_client_proto_client_client_proto protoreflect.FileDescriptor + +var file_konnectivity_client_proto_client_client_proto_rawDesc = []byte{ + 0x0a, 0x2d, 0x6b, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x2d, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0xd1, 0x02, 0x0a, 0x06, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1f, 0x0a, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0b, 0x2e, 0x50, 0x61, 0x63, 0x6b, 0x65, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x30, 0x0a, 0x0b, 0x64, + 0x69, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0c, 0x2e, 0x44, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, + 0x52, 0x0b, 0x64, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x33, 0x0a, + 0x0c, 0x64, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x44, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x64, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x05, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, + 0x33, 0x0a, 0x0c, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x0d, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x43, 0x6c, + 0x6f, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x63, + 0x6c, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x09, + 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x44, 0x69, 0x61, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0a, 0x2e, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x44, 0x69, 0x61, 0x6c, 0x48, 0x00, 0x52, 0x09, 0x63, + 0x6c, 0x6f, 0x73, 0x65, 0x44, 0x69, 0x61, 0x6c, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, + 0x6f, 0x61, 0x64, 0x22, 0x5b, 0x0a, 0x0b, 0x44, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x18, + 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x61, 0x6e, 0x64, + 0x6f, 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, + 0x22, 0x5a, 0x0a, 0x0c, 0x44, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x22, 0x2c, 0x0a, 0x0c, + 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, + 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x49, 0x44, 0x22, 0x43, 0x0a, 0x0d, 0x43, 0x6c, + 0x6f, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x49, 0x44, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x49, 0x44, 0x22, + 0x23, 0x0a, 0x09, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x44, 0x69, 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, + 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x72, 0x61, + 0x6e, 0x64, 0x6f, 0x6d, 0x22, 0x4e, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1c, 0x0a, 0x09, + 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, + 0x64, 0x61, 0x74, 0x61, 0x2a, 0x5e, 0x0a, 0x0a, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x41, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x10, 0x00, + 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x41, 0x4c, 0x5f, 0x52, 0x53, 0x50, 0x10, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x5f, 0x52, 0x45, 0x51, 0x10, 0x02, 0x12, 0x0d, 0x0a, + 0x09, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x5f, 0x52, 0x53, 0x50, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, + 0x44, 0x41, 0x54, 0x41, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x41, 0x4c, 0x5f, 0x43, + 0x4c, 0x53, 0x10, 0x05, 0x32, 0x2f, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x05, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x12, 0x07, 0x2e, + 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x1a, 0x07, 0x2e, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x22, + 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x46, 0x5a, 0x44, 0x73, 0x69, 0x67, 0x73, 0x2e, 0x6b, 0x38, + 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2d, 0x6e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2d, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x6b, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x2d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_konnectivity_client_proto_client_client_proto_rawDescOnce sync.Once + file_konnectivity_client_proto_client_client_proto_rawDescData = file_konnectivity_client_proto_client_client_proto_rawDesc +) -func (c *proxyServiceClient) Proxy(ctx context.Context, opts ...grpc.CallOption) (ProxyService_ProxyClient, error) { - stream, err := c.cc.NewStream(ctx, &_ProxyService_serviceDesc.Streams[0], "/ProxyService/Proxy", opts...) - if err != nil { - return nil, err +func file_konnectivity_client_proto_client_client_proto_rawDescGZIP() []byte { + file_konnectivity_client_proto_client_client_proto_rawDescOnce.Do(func() { + file_konnectivity_client_proto_client_client_proto_rawDescData = protoimpl.X.CompressGZIP(file_konnectivity_client_proto_client_client_proto_rawDescData) + }) + return file_konnectivity_client_proto_client_client_proto_rawDescData +} + +var file_konnectivity_client_proto_client_client_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_konnectivity_client_proto_client_client_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_konnectivity_client_proto_client_client_proto_goTypes = []interface{}{ + (PacketType)(0), // 0: PacketType + (*Packet)(nil), // 1: Packet + (*DialRequest)(nil), // 2: DialRequest + (*DialResponse)(nil), // 3: DialResponse + (*CloseRequest)(nil), // 4: CloseRequest + (*CloseResponse)(nil), // 5: CloseResponse + (*CloseDial)(nil), // 6: CloseDial + (*Data)(nil), // 7: Data +} +var file_konnectivity_client_proto_client_client_proto_depIdxs = []int32{ + 0, // 0: Packet.type:type_name -> PacketType + 2, // 1: Packet.dialRequest:type_name -> DialRequest + 3, // 2: Packet.dialResponse:type_name -> DialResponse + 7, // 3: Packet.data:type_name -> Data + 4, // 4: Packet.closeRequest:type_name -> CloseRequest + 5, // 5: Packet.closeResponse:type_name -> CloseResponse + 6, // 6: Packet.closeDial:type_name -> CloseDial + 1, // 7: ProxyService.Proxy:input_type -> Packet + 1, // 8: ProxyService.Proxy:output_type -> Packet + 8, // [8:9] is the sub-list for method output_type + 7, // [7:8] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_konnectivity_client_proto_client_client_proto_init() } +func file_konnectivity_client_proto_client_client_proto_init() { + if File_konnectivity_client_proto_client_client_proto != nil { + return } - x := &proxyServiceProxyClient{stream} - return x, nil -} - -type ProxyService_ProxyClient interface { - Send(*Packet) error - Recv() (*Packet, error) - grpc.ClientStream -} - -type proxyServiceProxyClient struct { - grpc.ClientStream -} - -func (x *proxyServiceProxyClient) Send(m *Packet) error { - return x.ClientStream.SendMsg(m) -} - -func (x *proxyServiceProxyClient) Recv() (*Packet, error) { - m := new(Packet) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err + if !protoimpl.UnsafeEnabled { + file_konnectivity_client_proto_client_client_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Packet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_konnectivity_client_proto_client_client_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DialRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_konnectivity_client_proto_client_client_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DialResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_konnectivity_client_proto_client_client_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CloseRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_konnectivity_client_proto_client_client_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CloseResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_konnectivity_client_proto_client_client_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CloseDial); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_konnectivity_client_proto_client_client_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Data); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } - return m, nil -} - -// ProxyServiceServer is the server API for ProxyService service. -type ProxyServiceServer interface { - Proxy(ProxyService_ProxyServer) error -} - -// UnimplementedProxyServiceServer can be embedded to have forward compatible implementations. -type UnimplementedProxyServiceServer struct { -} - -func (*UnimplementedProxyServiceServer) Proxy(srv ProxyService_ProxyServer) error { - return status.Errorf(codes.Unimplemented, "method Proxy not implemented") -} - -func RegisterProxyServiceServer(s *grpc.Server, srv ProxyServiceServer) { - s.RegisterService(&_ProxyService_serviceDesc, srv) -} - -func _ProxyService_Proxy_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(ProxyServiceServer).Proxy(&proxyServiceProxyServer{stream}) -} - -type ProxyService_ProxyServer interface { - Send(*Packet) error - Recv() (*Packet, error) - grpc.ServerStream -} - -type proxyServiceProxyServer struct { - grpc.ServerStream -} - -func (x *proxyServiceProxyServer) Send(m *Packet) error { - return x.ServerStream.SendMsg(m) -} - -func (x *proxyServiceProxyServer) Recv() (*Packet, error) { - m := new(Packet) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err + file_konnectivity_client_proto_client_client_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*Packet_DialRequest)(nil), + (*Packet_DialResponse)(nil), + (*Packet_Data)(nil), + (*Packet_CloseRequest)(nil), + (*Packet_CloseResponse)(nil), + (*Packet_CloseDial)(nil), } - return m, nil -} - -var _ProxyService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "ProxyService", - HandlerType: (*ProxyServiceServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "Proxy", - Handler: _ProxyService_Proxy_Handler, - ServerStreams: true, - ClientStreams: true, + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_konnectivity_client_proto_client_client_proto_rawDesc, + NumEnums: 1, + NumMessages: 7, + NumExtensions: 0, + NumServices: 1, }, - }, - Metadata: "konnectivity-client/proto/client/client.proto", + GoTypes: file_konnectivity_client_proto_client_client_proto_goTypes, + DependencyIndexes: file_konnectivity_client_proto_client_client_proto_depIdxs, + EnumInfos: file_konnectivity_client_proto_client_client_proto_enumTypes, + MessageInfos: file_konnectivity_client_proto_client_client_proto_msgTypes, + }.Build() + File_konnectivity_client_proto_client_client_proto = out.File + file_konnectivity_client_proto_client_client_proto_rawDesc = nil + file_konnectivity_client_proto_client_client_proto_goTypes = nil + file_konnectivity_client_proto_client_client_proto_depIdxs = nil } diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.proto b/cluster-autoscaler/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.proto index 0c4cff8cd241..811278e62da9 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.proto +++ b/cluster-autoscaler/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.proto @@ -32,11 +32,6 @@ enum PacketType { DIAL_CLS = 5; } -enum Error { - EOF = 0; - // ... -} - message Packet { PacketType type = 1; diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client_grpc.pb.go b/cluster-autoscaler/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client_grpc.pb.go new file mode 100644 index 000000000000..b8d07fe55a20 --- /dev/null +++ b/cluster-autoscaler/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client_grpc.pb.go @@ -0,0 +1,150 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.12.4 +// source: konnectivity-client/proto/client/client.proto + +package client + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// ProxyServiceClient is the client API for ProxyService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ProxyServiceClient interface { + Proxy(ctx context.Context, opts ...grpc.CallOption) (ProxyService_ProxyClient, error) +} + +type proxyServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewProxyServiceClient(cc grpc.ClientConnInterface) ProxyServiceClient { + return &proxyServiceClient{cc} +} + +func (c *proxyServiceClient) Proxy(ctx context.Context, opts ...grpc.CallOption) (ProxyService_ProxyClient, error) { + stream, err := c.cc.NewStream(ctx, &ProxyService_ServiceDesc.Streams[0], "/ProxyService/Proxy", opts...) + if err != nil { + return nil, err + } + x := &proxyServiceProxyClient{stream} + return x, nil +} + +type ProxyService_ProxyClient interface { + Send(*Packet) error + Recv() (*Packet, error) + grpc.ClientStream +} + +type proxyServiceProxyClient struct { + grpc.ClientStream +} + +func (x *proxyServiceProxyClient) Send(m *Packet) error { + return x.ClientStream.SendMsg(m) +} + +func (x *proxyServiceProxyClient) Recv() (*Packet, error) { + m := new(Packet) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ProxyServiceServer is the server API for ProxyService service. +// All implementations should embed UnimplementedProxyServiceServer +// for forward compatibility +type ProxyServiceServer interface { + Proxy(ProxyService_ProxyServer) error +} + +// UnimplementedProxyServiceServer should be embedded to have forward compatible implementations. +type UnimplementedProxyServiceServer struct { +} + +func (UnimplementedProxyServiceServer) Proxy(ProxyService_ProxyServer) error { + return status.Errorf(codes.Unimplemented, "method Proxy not implemented") +} + +// UnsafeProxyServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ProxyServiceServer will +// result in compilation errors. +type UnsafeProxyServiceServer interface { + mustEmbedUnimplementedProxyServiceServer() +} + +func RegisterProxyServiceServer(s grpc.ServiceRegistrar, srv ProxyServiceServer) { + s.RegisterService(&ProxyService_ServiceDesc, srv) +} + +func _ProxyService_Proxy_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ProxyServiceServer).Proxy(&proxyServiceProxyServer{stream}) +} + +type ProxyService_ProxyServer interface { + Send(*Packet) error + Recv() (*Packet, error) + grpc.ServerStream +} + +type proxyServiceProxyServer struct { + grpc.ServerStream +} + +func (x *proxyServiceProxyServer) Send(m *Packet) error { + return x.ServerStream.SendMsg(m) +} + +func (x *proxyServiceProxyServer) Recv() (*Packet, error) { + m := new(Packet) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ProxyService_ServiceDesc is the grpc.ServiceDesc for ProxyService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ProxyService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "ProxyService", + HandlerType: (*ProxyServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Proxy", + Handler: _ProxyService_Proxy_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "konnectivity-client/proto/client/client.proto", +} diff --git a/cluster-autoscaler/version/version.go b/cluster-autoscaler/version/version.go index ff21434564f0..40f62afc4824 100644 --- a/cluster-autoscaler/version/version.go +++ b/cluster-autoscaler/version/version.go @@ -17,4 +17,4 @@ limitations under the License. package version // ClusterAutoscalerVersion contains version of CA. -const ClusterAutoscalerVersion = "1.26.0" +const ClusterAutoscalerVersion = "1.27.0-alpha.1" From 32c6bbc0c9eced9c756525e15dfb3cfb00a52226 Mon Sep 17 00:00:00 2001 From: Jayant Jain Date: Wed, 25 Jan 2023 16:01:10 +0000 Subject: [PATCH 35/35] return argument of framework.RunFilterPlugins has changed from a map[string]*Status to *Status. The func returns after the first failed plugin. Augmented schedulerbased.go to reflect this change. --- .../predicatechecker/schedulerbased.go | 41 ++++++++----------- 1 file changed, 18 insertions(+), 23 deletions(-) diff --git a/cluster-autoscaler/simulator/predicatechecker/schedulerbased.go b/cluster-autoscaler/simulator/predicatechecker/schedulerbased.go index a7da3e584030..e6c486f82f68 100644 --- a/cluster-autoscaler/simulator/predicatechecker/schedulerbased.go +++ b/cluster-autoscaler/simulator/predicatechecker/schedulerbased.go @@ -126,15 +126,8 @@ func (p *SchedulerBasedPredicateChecker) FitsAnyNodeMatching(clusterSnapshot clu continue } - filterStatuses := p.framework.RunFilterPlugins(context.TODO(), state, pod, nodeInfo) - ok := true - for _, filterStatus := range filterStatuses { - if !filterStatus.IsSuccess() { - ok = false - break - } - } - if ok { + filterStatus := p.framework.RunFilterPlugins(context.TODO(), state, pod, nodeInfo) + if filterStatus.IsSuccess() { p.lastIndex = (p.lastIndex + i + 1) % len(nodeInfosList) return nodeInfo.Node().Name, nil } @@ -167,24 +160,26 @@ func (p *SchedulerBasedPredicateChecker) CheckPredicates(clusterSnapshot cluster emptyString) } - filterStatuses := p.framework.RunFilterPlugins(context.TODO(), state, pod, nodeInfo) - for filterName, filterStatus := range filterStatuses { - if !filterStatus.IsSuccess() { - if filterStatus.IsUnschedulable() { - return NewPredicateError( - NotSchedulablePredicateError, - filterName, - filterStatus.Message(), - filterStatus.Reasons(), - p.buildDebugInfo(filterName, nodeInfo)) - } + filterStatus := p.framework.RunFilterPlugins(context.TODO(), state, pod, nodeInfo) + + if !filterStatus.IsSuccess() { + filterName := filterStatus.FailedPlugin() + filterMessage := filterStatus.Message() + filterReasons := filterStatus.Reasons() + if filterStatus.IsUnschedulable() { return NewPredicateError( - InternalPredicateError, + NotSchedulablePredicateError, filterName, - filterStatus.Message(), - filterStatus.Reasons(), + filterMessage, + filterReasons, p.buildDebugInfo(filterName, nodeInfo)) } + return NewPredicateError( + InternalPredicateError, + filterName, + filterMessage, + filterReasons, + p.buildDebugInfo(filterName, nodeInfo)) } return nil }